var/home/core/zuul-output/0000755000175000017500000000000015116035264014530 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015116051546015475 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005402015015116051536017675 0ustar rootrootDec 09 15:08:31 crc systemd[1]: Starting Kubernetes Kubelet... Dec 09 15:08:31 crc restorecon[4684]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 15:08:31 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 09 15:08:32 crc restorecon[4684]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 09 15:08:32 crc restorecon[4684]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Dec 09 15:08:33 crc kubenswrapper[4716]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Dec 09 15:08:33 crc kubenswrapper[4716]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Dec 09 15:08:33 crc kubenswrapper[4716]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Dec 09 15:08:33 crc kubenswrapper[4716]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Dec 09 15:08:33 crc kubenswrapper[4716]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Dec 09 15:08:33 crc kubenswrapper[4716]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.015162 4716 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021358 4716 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021396 4716 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021401 4716 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021406 4716 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021411 4716 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021415 4716 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021419 4716 feature_gate.go:330] unrecognized feature gate: OVNObservability Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021423 4716 feature_gate.go:330] unrecognized feature gate: PlatformOperators Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021428 4716 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021434 4716 feature_gate.go:330] unrecognized feature gate: InsightsConfig Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021440 4716 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021445 4716 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021450 4716 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021457 4716 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021462 4716 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021466 4716 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021470 4716 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021474 4716 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021478 4716 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021482 4716 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021485 4716 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021489 4716 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021493 4716 feature_gate.go:330] unrecognized feature gate: NewOLM Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021496 4716 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021500 4716 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021504 4716 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021510 4716 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021514 4716 feature_gate.go:330] unrecognized feature gate: PinnedImages Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021518 4716 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021521 4716 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021531 4716 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021535 4716 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021539 4716 feature_gate.go:330] unrecognized feature gate: SignatureStores Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021543 4716 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021547 4716 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021550 4716 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021554 4716 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021558 4716 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021562 4716 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021567 4716 feature_gate.go:330] unrecognized feature gate: GatewayAPI Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021571 4716 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021576 4716 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021580 4716 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021585 4716 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021589 4716 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021597 4716 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021602 4716 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021607 4716 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021611 4716 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021615 4716 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021634 4716 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021639 4716 feature_gate.go:330] unrecognized feature gate: Example Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021642 4716 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021646 4716 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021650 4716 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021654 4716 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021658 4716 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021661 4716 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021667 4716 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021671 4716 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021675 4716 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021679 4716 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021683 4716 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021689 4716 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021694 4716 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021698 4716 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021702 4716 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021706 4716 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021710 4716 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021714 4716 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.021717 4716 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.021813 4716 flags.go:64] FLAG: --address="0.0.0.0" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.021824 4716 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.021831 4716 flags.go:64] FLAG: --anonymous-auth="true" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.021837 4716 flags.go:64] FLAG: --application-metrics-count-limit="100" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.021843 4716 flags.go:64] FLAG: --authentication-token-webhook="false" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.021847 4716 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.021853 4716 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.021859 4716 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.021864 4716 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.021870 4716 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.021875 4716 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.021881 4716 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.021886 4716 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.021891 4716 flags.go:64] FLAG: --cgroup-root="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.021896 4716 flags.go:64] FLAG: --cgroups-per-qos="true" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.021901 4716 flags.go:64] FLAG: --client-ca-file="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.021906 4716 flags.go:64] FLAG: --cloud-config="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.021910 4716 flags.go:64] FLAG: --cloud-provider="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.021914 4716 flags.go:64] FLAG: --cluster-dns="[]" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.021920 4716 flags.go:64] FLAG: --cluster-domain="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.021924 4716 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.021929 4716 flags.go:64] FLAG: --config-dir="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.021933 4716 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.021937 4716 flags.go:64] FLAG: --container-log-max-files="5" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.021943 4716 flags.go:64] FLAG: --container-log-max-size="10Mi" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.021948 4716 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.021952 4716 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.021957 4716 flags.go:64] FLAG: --containerd-namespace="k8s.io" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.021961 4716 flags.go:64] FLAG: --contention-profiling="false" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.021966 4716 flags.go:64] FLAG: --cpu-cfs-quota="true" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.021970 4716 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.021974 4716 flags.go:64] FLAG: --cpu-manager-policy="none" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.021978 4716 flags.go:64] FLAG: --cpu-manager-policy-options="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.021984 4716 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.021988 4716 flags.go:64] FLAG: --enable-controller-attach-detach="true" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.021992 4716 flags.go:64] FLAG: --enable-debugging-handlers="true" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.021996 4716 flags.go:64] FLAG: --enable-load-reader="false" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022000 4716 flags.go:64] FLAG: --enable-server="true" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022006 4716 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022013 4716 flags.go:64] FLAG: --event-burst="100" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022018 4716 flags.go:64] FLAG: --event-qps="50" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022022 4716 flags.go:64] FLAG: --event-storage-age-limit="default=0" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022026 4716 flags.go:64] FLAG: --event-storage-event-limit="default=0" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022030 4716 flags.go:64] FLAG: --eviction-hard="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022039 4716 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022043 4716 flags.go:64] FLAG: --eviction-minimum-reclaim="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022048 4716 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022053 4716 flags.go:64] FLAG: --eviction-soft="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022057 4716 flags.go:64] FLAG: --eviction-soft-grace-period="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022062 4716 flags.go:64] FLAG: --exit-on-lock-contention="false" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022066 4716 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022070 4716 flags.go:64] FLAG: --experimental-mounter-path="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022074 4716 flags.go:64] FLAG: --fail-cgroupv1="false" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022078 4716 flags.go:64] FLAG: --fail-swap-on="true" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022082 4716 flags.go:64] FLAG: --feature-gates="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022088 4716 flags.go:64] FLAG: --file-check-frequency="20s" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022093 4716 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022097 4716 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022102 4716 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022106 4716 flags.go:64] FLAG: --healthz-port="10248" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022110 4716 flags.go:64] FLAG: --help="false" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022114 4716 flags.go:64] FLAG: --hostname-override="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022119 4716 flags.go:64] FLAG: --housekeeping-interval="10s" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022123 4716 flags.go:64] FLAG: --http-check-frequency="20s" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022127 4716 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022132 4716 flags.go:64] FLAG: --image-credential-provider-config="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022135 4716 flags.go:64] FLAG: --image-gc-high-threshold="85" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022140 4716 flags.go:64] FLAG: --image-gc-low-threshold="80" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022144 4716 flags.go:64] FLAG: --image-service-endpoint="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022148 4716 flags.go:64] FLAG: --kernel-memcg-notification="false" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022152 4716 flags.go:64] FLAG: --kube-api-burst="100" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022157 4716 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022162 4716 flags.go:64] FLAG: --kube-api-qps="50" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022166 4716 flags.go:64] FLAG: --kube-reserved="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022170 4716 flags.go:64] FLAG: --kube-reserved-cgroup="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022174 4716 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022179 4716 flags.go:64] FLAG: --kubelet-cgroups="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022194 4716 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022199 4716 flags.go:64] FLAG: --lock-file="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022203 4716 flags.go:64] FLAG: --log-cadvisor-usage="false" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022207 4716 flags.go:64] FLAG: --log-flush-frequency="5s" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022211 4716 flags.go:64] FLAG: --log-json-info-buffer-size="0" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022222 4716 flags.go:64] FLAG: --log-json-split-stream="false" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022227 4716 flags.go:64] FLAG: --log-text-info-buffer-size="0" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022231 4716 flags.go:64] FLAG: --log-text-split-stream="false" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022235 4716 flags.go:64] FLAG: --logging-format="text" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022239 4716 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022244 4716 flags.go:64] FLAG: --make-iptables-util-chains="true" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022249 4716 flags.go:64] FLAG: --manifest-url="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022254 4716 flags.go:64] FLAG: --manifest-url-header="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022259 4716 flags.go:64] FLAG: --max-housekeeping-interval="15s" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022264 4716 flags.go:64] FLAG: --max-open-files="1000000" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022269 4716 flags.go:64] FLAG: --max-pods="110" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022273 4716 flags.go:64] FLAG: --maximum-dead-containers="-1" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022277 4716 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022281 4716 flags.go:64] FLAG: --memory-manager-policy="None" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022286 4716 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022290 4716 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022294 4716 flags.go:64] FLAG: --node-ip="192.168.126.11" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022299 4716 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022310 4716 flags.go:64] FLAG: --node-status-max-images="50" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022316 4716 flags.go:64] FLAG: --node-status-update-frequency="10s" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022321 4716 flags.go:64] FLAG: --oom-score-adj="-999" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022326 4716 flags.go:64] FLAG: --pod-cidr="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022330 4716 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022340 4716 flags.go:64] FLAG: --pod-manifest-path="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022345 4716 flags.go:64] FLAG: --pod-max-pids="-1" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022350 4716 flags.go:64] FLAG: --pods-per-core="0" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022355 4716 flags.go:64] FLAG: --port="10250" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022360 4716 flags.go:64] FLAG: --protect-kernel-defaults="false" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022366 4716 flags.go:64] FLAG: --provider-id="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022370 4716 flags.go:64] FLAG: --qos-reserved="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022375 4716 flags.go:64] FLAG: --read-only-port="10255" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022379 4716 flags.go:64] FLAG: --register-node="true" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022384 4716 flags.go:64] FLAG: --register-schedulable="true" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022388 4716 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022396 4716 flags.go:64] FLAG: --registry-burst="10" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022400 4716 flags.go:64] FLAG: --registry-qps="5" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022405 4716 flags.go:64] FLAG: --reserved-cpus="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022409 4716 flags.go:64] FLAG: --reserved-memory="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022417 4716 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022422 4716 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022426 4716 flags.go:64] FLAG: --rotate-certificates="false" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022431 4716 flags.go:64] FLAG: --rotate-server-certificates="false" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022435 4716 flags.go:64] FLAG: --runonce="false" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022439 4716 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022444 4716 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022449 4716 flags.go:64] FLAG: --seccomp-default="false" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022453 4716 flags.go:64] FLAG: --serialize-image-pulls="true" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022457 4716 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022461 4716 flags.go:64] FLAG: --storage-driver-db="cadvisor" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022466 4716 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022471 4716 flags.go:64] FLAG: --storage-driver-password="root" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022475 4716 flags.go:64] FLAG: --storage-driver-secure="false" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022479 4716 flags.go:64] FLAG: --storage-driver-table="stats" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022483 4716 flags.go:64] FLAG: --storage-driver-user="root" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022487 4716 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022492 4716 flags.go:64] FLAG: --sync-frequency="1m0s" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022497 4716 flags.go:64] FLAG: --system-cgroups="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022501 4716 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022508 4716 flags.go:64] FLAG: --system-reserved-cgroup="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022512 4716 flags.go:64] FLAG: --tls-cert-file="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022518 4716 flags.go:64] FLAG: --tls-cipher-suites="[]" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022524 4716 flags.go:64] FLAG: --tls-min-version="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022528 4716 flags.go:64] FLAG: --tls-private-key-file="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022532 4716 flags.go:64] FLAG: --topology-manager-policy="none" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022537 4716 flags.go:64] FLAG: --topology-manager-policy-options="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022541 4716 flags.go:64] FLAG: --topology-manager-scope="container" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022546 4716 flags.go:64] FLAG: --v="2" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022551 4716 flags.go:64] FLAG: --version="false" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022557 4716 flags.go:64] FLAG: --vmodule="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022562 4716 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.022568 4716 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022708 4716 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022718 4716 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022724 4716 feature_gate.go:330] unrecognized feature gate: InsightsConfig Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022728 4716 feature_gate.go:330] unrecognized feature gate: Example Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022732 4716 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022737 4716 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022740 4716 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022745 4716 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022751 4716 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022755 4716 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022760 4716 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022765 4716 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022769 4716 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022774 4716 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022779 4716 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022788 4716 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022795 4716 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022800 4716 feature_gate.go:330] unrecognized feature gate: PlatformOperators Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022805 4716 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022811 4716 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022817 4716 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022820 4716 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022824 4716 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022827 4716 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022831 4716 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022834 4716 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022838 4716 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022842 4716 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022845 4716 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022849 4716 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022852 4716 feature_gate.go:330] unrecognized feature gate: GatewayAPI Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022857 4716 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022861 4716 feature_gate.go:330] unrecognized feature gate: PinnedImages Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022866 4716 feature_gate.go:330] unrecognized feature gate: SignatureStores Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022870 4716 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022875 4716 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022879 4716 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022884 4716 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022896 4716 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022900 4716 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022910 4716 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022914 4716 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022918 4716 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022923 4716 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022927 4716 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022933 4716 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022937 4716 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022942 4716 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022946 4716 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022950 4716 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022954 4716 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022958 4716 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022962 4716 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022967 4716 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022970 4716 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022974 4716 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022977 4716 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022982 4716 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022985 4716 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022989 4716 feature_gate.go:330] unrecognized feature gate: OVNObservability Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022993 4716 feature_gate.go:330] unrecognized feature gate: NewOLM Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.022996 4716 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.023000 4716 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.023004 4716 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.023007 4716 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.023011 4716 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.023015 4716 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.023018 4716 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.023022 4716 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.023025 4716 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.023028 4716 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.023035 4716 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.033001 4716 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.033051 4716 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033118 4716 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033126 4716 feature_gate.go:330] unrecognized feature gate: Example Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033130 4716 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033134 4716 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033138 4716 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033142 4716 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033146 4716 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033150 4716 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033153 4716 feature_gate.go:330] unrecognized feature gate: PinnedImages Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033157 4716 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033160 4716 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033164 4716 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033167 4716 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033171 4716 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033175 4716 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033179 4716 feature_gate.go:330] unrecognized feature gate: PlatformOperators Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033183 4716 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033188 4716 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033192 4716 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033195 4716 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033199 4716 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033203 4716 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033208 4716 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033213 4716 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033219 4716 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033224 4716 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033228 4716 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033233 4716 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033239 4716 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033243 4716 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033247 4716 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033251 4716 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033256 4716 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033261 4716 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033268 4716 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033273 4716 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033278 4716 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033283 4716 feature_gate.go:330] unrecognized feature gate: GatewayAPI Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033288 4716 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033292 4716 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033296 4716 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033300 4716 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033305 4716 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033309 4716 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033313 4716 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033317 4716 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033321 4716 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033326 4716 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033330 4716 feature_gate.go:330] unrecognized feature gate: NewOLM Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033333 4716 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033337 4716 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033340 4716 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033346 4716 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033352 4716 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033356 4716 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033362 4716 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033367 4716 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033372 4716 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033376 4716 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033380 4716 feature_gate.go:330] unrecognized feature gate: InsightsConfig Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033383 4716 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033387 4716 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033392 4716 feature_gate.go:330] unrecognized feature gate: SignatureStores Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033396 4716 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033399 4716 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033403 4716 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033407 4716 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033410 4716 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033413 4716 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033417 4716 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033422 4716 feature_gate.go:330] unrecognized feature gate: OVNObservability Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.033428 4716 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033573 4716 feature_gate.go:330] unrecognized feature gate: NewOLM Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033581 4716 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033585 4716 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033589 4716 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033592 4716 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033597 4716 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033600 4716 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033604 4716 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033610 4716 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033614 4716 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033633 4716 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033638 4716 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033642 4716 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033647 4716 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033652 4716 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033657 4716 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033662 4716 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033667 4716 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033671 4716 feature_gate.go:330] unrecognized feature gate: Example Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033675 4716 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033679 4716 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033684 4716 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033688 4716 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033692 4716 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033696 4716 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033700 4716 feature_gate.go:330] unrecognized feature gate: PlatformOperators Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033705 4716 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033708 4716 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033712 4716 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033716 4716 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033719 4716 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033723 4716 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033726 4716 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033730 4716 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033734 4716 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033737 4716 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033741 4716 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033745 4716 feature_gate.go:330] unrecognized feature gate: SignatureStores Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033748 4716 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033752 4716 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033755 4716 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033759 4716 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033763 4716 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033766 4716 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033769 4716 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033773 4716 feature_gate.go:330] unrecognized feature gate: InsightsConfig Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033776 4716 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033780 4716 feature_gate.go:330] unrecognized feature gate: PinnedImages Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033784 4716 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033787 4716 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033790 4716 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033794 4716 feature_gate.go:330] unrecognized feature gate: GatewayAPI Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033798 4716 feature_gate.go:330] unrecognized feature gate: OVNObservability Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033801 4716 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033807 4716 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033811 4716 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033816 4716 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033821 4716 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033825 4716 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033830 4716 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033835 4716 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033839 4716 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033844 4716 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033848 4716 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033853 4716 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033857 4716 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033860 4716 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033864 4716 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033868 4716 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033871 4716 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.033875 4716 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.033881 4716 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.034053 4716 server.go:940] "Client rotation is on, will bootstrap in background" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.036802 4716 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.036891 4716 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.038271 4716 server.go:997] "Starting client certificate rotation" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.038315 4716 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.038495 4716 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-06 10:55:56.330110533 +0000 UTC Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.038606 4716 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.046177 4716 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Dec 09 15:08:33 crc kubenswrapper[4716]: E1209 15:08:33.048800 4716 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.66:6443: connect: connection refused" logger="UnhandledError" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.050101 4716 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.062117 4716 log.go:25] "Validated CRI v1 runtime API" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.080037 4716 log.go:25] "Validated CRI v1 image API" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.081957 4716 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.085689 4716 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-12-09-15-04-28-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.085769 4716 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.111535 4716 manager.go:217] Machine: {Timestamp:2025-12-09 15:08:33.10779405 +0000 UTC m=+0.262538078 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654124544 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:9da27605-fb2f-423a-b7bb-978a678a6bed BootID:7d76960a-8b61-4af0-9ff6-3ba0f6120862 Filesystems:[{Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365408768 Type:vfs Inodes:821633 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108169 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827060224 Type:vfs Inodes:4108169 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:fd:4d:6a Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:fd:4d:6a Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:c5:18:2e Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:b9:2d:f9 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:d6:1b:34 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:d1:91:5b Speed:-1 Mtu:1496} {Name:eth10 MacAddress:1e:c5:03:81:b3:ec Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:2e:1c:db:42:f8:76 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654124544 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.111960 4716 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.112244 4716 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.113618 4716 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.113910 4716 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.113967 4716 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.114285 4716 topology_manager.go:138] "Creating topology manager with none policy" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.114304 4716 container_manager_linux.go:303] "Creating device plugin manager" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.114505 4716 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.114552 4716 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.115023 4716 state_mem.go:36] "Initialized new in-memory state store" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.115166 4716 server.go:1245] "Using root directory" path="/var/lib/kubelet" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.116151 4716 kubelet.go:418] "Attempting to sync node with API server" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.116188 4716 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.116260 4716 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.116285 4716 kubelet.go:324] "Adding apiserver pod source" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.116313 4716 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.118413 4716 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.66:6443: connect: connection refused Dec 09 15:08:33 crc kubenswrapper[4716]: E1209 15:08:33.118542 4716 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.66:6443: connect: connection refused" logger="UnhandledError" Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.118651 4716 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.66:6443: connect: connection refused Dec 09 15:08:33 crc kubenswrapper[4716]: E1209 15:08:33.118795 4716 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.66:6443: connect: connection refused" logger="UnhandledError" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.118963 4716 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.119434 4716 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.120521 4716 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.121269 4716 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.121296 4716 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.121305 4716 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.121314 4716 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.121328 4716 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.121341 4716 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.121350 4716 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.121365 4716 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.121375 4716 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.121385 4716 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.121419 4716 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.121430 4716 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.121674 4716 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.122344 4716 server.go:1280] "Started kubelet" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.123085 4716 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.123241 4716 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.66:6443: connect: connection refused Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.123128 4716 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Dec 09 15:08:33 crc systemd[1]: Started Kubernetes Kubelet. Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.125383 4716 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Dec 09 15:08:33 crc kubenswrapper[4716]: E1209 15:08:33.125921 4716 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.66:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187f948d332b5900 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-09 15:08:33.122302208 +0000 UTC m=+0.277046196,LastTimestamp:2025-12-09 15:08:33.122302208 +0000 UTC m=+0.277046196,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.127157 4716 server.go:460] "Adding debug handlers to kubelet server" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.127976 4716 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.128016 4716 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.128683 4716 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Dec 09 15:08:33 crc kubenswrapper[4716]: E1209 15:08:33.128702 4716 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.128729 4716 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-05 09:36:46.637654201 +0000 UTC Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.132329 4716 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 642h28m13.505342302s for next certificate rotation Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.129585 4716 volume_manager.go:287] "The desired_state_of_world populator starts" Dec 09 15:08:33 crc kubenswrapper[4716]: E1209 15:08:33.129398 4716 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.66:6443: connect: connection refused" interval="200ms" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.132410 4716 volume_manager.go:289] "Starting Kubelet Volume Manager" Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.130448 4716 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.66:6443: connect: connection refused Dec 09 15:08:33 crc kubenswrapper[4716]: E1209 15:08:33.132506 4716 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.66:6443: connect: connection refused" logger="UnhandledError" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.131871 4716 factory.go:153] Registering CRI-O factory Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.132588 4716 factory.go:221] Registration of the crio container factory successfully Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.133273 4716 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.133326 4716 factory.go:55] Registering systemd factory Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.133338 4716 factory.go:221] Registration of the systemd container factory successfully Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.133382 4716 factory.go:103] Registering Raw factory Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.133396 4716 manager.go:1196] Started watching for new ooms in manager Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.134356 4716 manager.go:319] Starting recovery of all containers Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142197 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142265 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142278 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142291 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142302 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142313 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142325 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142337 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142350 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142363 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142373 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142382 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142392 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142406 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142452 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142469 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142481 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142521 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142533 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142544 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142554 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142566 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142605 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142690 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142707 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142721 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142734 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142748 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142762 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142776 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142790 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142805 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142817 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142829 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142841 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142854 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142868 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142879 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142890 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142906 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142919 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142932 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142945 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142960 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142973 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142985 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.142997 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143012 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143024 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143037 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143050 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143063 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143082 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143098 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143113 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143127 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143142 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143155 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143168 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143182 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143198 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143211 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143227 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143243 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143257 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143271 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143283 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143296 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143309 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143323 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143337 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143350 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143365 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143377 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143391 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143404 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143418 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143432 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143452 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143469 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143481 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143493 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143506 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143524 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143537 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143550 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143563 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143575 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143587 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143598 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143610 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143645 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143660 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143672 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143686 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143698 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143711 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143730 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143741 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143753 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143765 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143777 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143789 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143805 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143824 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143838 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143851 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143864 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143877 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143892 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143904 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143918 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143932 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143945 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143958 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143974 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143988 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.143999 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.144010 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.144022 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.144034 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.144084 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.144098 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.144118 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.144131 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.144143 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.144160 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.144178 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.144191 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.144203 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.144215 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.144229 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.144241 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.144255 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.144336 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.144348 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.144361 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.144372 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.144384 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.144397 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.144409 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.144420 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.144432 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.144444 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.144455 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.144467 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.144479 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.144496 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.144509 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.144522 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.144534 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.144546 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.144558 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.144570 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.144583 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.145439 4716 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.145468 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.145484 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.145498 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.145513 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.145525 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.145538 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.145551 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.145565 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.145577 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.145589 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.145613 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.145645 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.145656 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.145692 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.145717 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.145730 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.145742 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.145754 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.145765 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.145780 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.145792 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.145806 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.145819 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.145831 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.145843 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.145856 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.145867 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.145878 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.145892 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.145903 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.145916 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.145928 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.145940 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.145953 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.145965 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.145981 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.145993 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.146006 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.146019 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.146032 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.146044 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.146056 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.146069 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.146081 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.146094 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.146106 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.146119 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.146132 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.146147 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.146161 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.146173 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.146188 4716 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.146200 4716 reconstruct.go:97] "Volume reconstruction finished" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.146210 4716 reconciler.go:26] "Reconciler: start to sync state" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.150407 4716 manager.go:324] Recovery completed Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.162881 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.166045 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.166089 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.166105 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.167020 4716 cpu_manager.go:225] "Starting CPU manager" policy="none" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.167035 4716 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.167059 4716 state_mem.go:36] "Initialized new in-memory state store" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.209545 4716 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.212236 4716 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.212303 4716 status_manager.go:217] "Starting to sync pod status with apiserver" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.212343 4716 kubelet.go:2335] "Starting kubelet main sync loop" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.212562 4716 policy_none.go:49] "None policy: Start" Dec 09 15:08:33 crc kubenswrapper[4716]: E1209 15:08:33.212692 4716 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.213457 4716 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.66:6443: connect: connection refused Dec 09 15:08:33 crc kubenswrapper[4716]: E1209 15:08:33.213656 4716 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.66:6443: connect: connection refused" logger="UnhandledError" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.213738 4716 memory_manager.go:170] "Starting memorymanager" policy="None" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.213767 4716 state_mem.go:35] "Initializing new in-memory state store" Dec 09 15:08:33 crc kubenswrapper[4716]: E1209 15:08:33.232686 4716 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.271367 4716 manager.go:334] "Starting Device Plugin manager" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.271451 4716 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.271465 4716 server.go:79] "Starting device plugin registration server" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.271998 4716 eviction_manager.go:189] "Eviction manager: starting control loop" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.272018 4716 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.272245 4716 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.272328 4716 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.272339 4716 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Dec 09 15:08:33 crc kubenswrapper[4716]: E1209 15:08:33.281763 4716 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.313342 4716 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.313454 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.314752 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.314804 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.314824 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.314980 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.315612 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.315760 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.315968 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.316015 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.316028 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.316276 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.316418 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.316466 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.317542 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.317577 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.317591 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.317958 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.317968 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.317993 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.318002 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.318008 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.318014 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.318192 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.318349 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.318419 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.319424 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.319448 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.319460 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.319553 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.319594 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.319588 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.319639 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.319800 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.319871 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.320785 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.320809 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.320820 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.320904 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.320931 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.320967 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.321337 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.321394 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.322324 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.322354 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.322366 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:33 crc kubenswrapper[4716]: E1209 15:08:33.333203 4716 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.66:6443: connect: connection refused" interval="400ms" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.348773 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.348806 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.348824 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.348839 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.348898 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.348938 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.349015 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.349111 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.349158 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.349185 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.349228 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.349258 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.349274 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.349289 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.349308 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.374131 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.375435 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.375469 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.375478 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.375508 4716 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 09 15:08:33 crc kubenswrapper[4716]: E1209 15:08:33.377402 4716 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.66:6443: connect: connection refused" node="crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.450338 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.450419 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.450456 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.450493 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.450525 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.450536 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.450646 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.450656 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.450559 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.450706 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.450817 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.450850 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.450860 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.450918 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.450936 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.450952 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.450968 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.451012 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.451029 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.451036 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.451043 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.451073 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.451094 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.451114 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.451127 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.451141 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.451154 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.451195 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.451217 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.451063 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.578080 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.580001 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.580036 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.580046 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.580067 4716 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 09 15:08:33 crc kubenswrapper[4716]: E1209 15:08:33.580759 4716 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.66:6443: connect: connection refused" node="crc" Dec 09 15:08:33 crc kubenswrapper[4716]: E1209 15:08:33.617923 4716 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.66:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187f948d332b5900 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-09 15:08:33.122302208 +0000 UTC m=+0.277046196,LastTimestamp:2025-12-09 15:08:33.122302208 +0000 UTC m=+0.277046196,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.645959 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.665959 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.669567 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-11a8d53f71ff42a78498a1a31c9506df5daa4a92e8e53484041c2b8e4b2c4f6a WatchSource:0}: Error finding container 11a8d53f71ff42a78498a1a31c9506df5daa4a92e8e53484041c2b8e4b2c4f6a: Status 404 returned error can't find the container with id 11a8d53f71ff42a78498a1a31c9506df5daa4a92e8e53484041c2b8e4b2c4f6a Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.674680 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.692561 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-33087ccf457f16ca98113928e195f5dc2bdb563d10f4749424b5325e078d6b43 WatchSource:0}: Error finding container 33087ccf457f16ca98113928e195f5dc2bdb563d10f4749424b5325e078d6b43: Status 404 returned error can't find the container with id 33087ccf457f16ca98113928e195f5dc2bdb563d10f4749424b5325e078d6b43 Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.694510 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-fb7a1b8399d1d019dcdd25d6acda6eeb97954947ff47a8f485f67238477816a0 WatchSource:0}: Error finding container fb7a1b8399d1d019dcdd25d6acda6eeb97954947ff47a8f485f67238477816a0: Status 404 returned error can't find the container with id fb7a1b8399d1d019dcdd25d6acda6eeb97954947ff47a8f485f67238477816a0 Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.701309 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.706201 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.721085 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-8f8bd57b6652e223dce709497d292b567dd2da69494faa50eee39fcb0e10fdcf WatchSource:0}: Error finding container 8f8bd57b6652e223dce709497d292b567dd2da69494faa50eee39fcb0e10fdcf: Status 404 returned error can't find the container with id 8f8bd57b6652e223dce709497d292b567dd2da69494faa50eee39fcb0e10fdcf Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.727850 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-a51e3dd655a0ea8e66f043ecbf9138f90fac8ca6f32f2b2d5ec9b9a9c7451fc3 WatchSource:0}: Error finding container a51e3dd655a0ea8e66f043ecbf9138f90fac8ca6f32f2b2d5ec9b9a9c7451fc3: Status 404 returned error can't find the container with id a51e3dd655a0ea8e66f043ecbf9138f90fac8ca6f32f2b2d5ec9b9a9c7451fc3 Dec 09 15:08:33 crc kubenswrapper[4716]: E1209 15:08:33.734636 4716 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.66:6443: connect: connection refused" interval="800ms" Dec 09 15:08:33 crc kubenswrapper[4716]: W1209 15:08:33.944599 4716 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.66:6443: connect: connection refused Dec 09 15:08:33 crc kubenswrapper[4716]: E1209 15:08:33.944745 4716 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.66:6443: connect: connection refused" logger="UnhandledError" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.981571 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.982719 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.982759 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.982771 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:33 crc kubenswrapper[4716]: I1209 15:08:33.982799 4716 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 09 15:08:33 crc kubenswrapper[4716]: E1209 15:08:33.983223 4716 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.66:6443: connect: connection refused" node="crc" Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.124708 4716 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.66:6443: connect: connection refused Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.218098 4716 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="8fe87f75d755671ee24326254af036cce1ac0ef201535b3eb56fde3c3dfea65a" exitCode=0 Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.218153 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"8fe87f75d755671ee24326254af036cce1ac0ef201535b3eb56fde3c3dfea65a"} Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.218335 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"a51e3dd655a0ea8e66f043ecbf9138f90fac8ca6f32f2b2d5ec9b9a9c7451fc3"} Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.218540 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.221532 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.221598 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.221612 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.223316 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302"} Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.223382 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"8f8bd57b6652e223dce709497d292b567dd2da69494faa50eee39fcb0e10fdcf"} Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.225064 4716 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b" exitCode=0 Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.225148 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b"} Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.225179 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"fb7a1b8399d1d019dcdd25d6acda6eeb97954947ff47a8f485f67238477816a0"} Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.225269 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.226179 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.226221 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.226238 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.228171 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.228790 4716 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="00d7a5ce1b61692a21155afd4c4eb12d16829994e3163be6b3ba0989d5336083" exitCode=0 Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.228892 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"00d7a5ce1b61692a21155afd4c4eb12d16829994e3163be6b3ba0989d5336083"} Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.228925 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"33087ccf457f16ca98113928e195f5dc2bdb563d10f4749424b5325e078d6b43"} Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.229061 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.229173 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.229208 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.229224 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.230054 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.230097 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.230114 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.230788 4716 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="73c1f89c954b2acee10bb93e31a85e94d9b8e8af431d1351c2dc54d69535c33b" exitCode=0 Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.230837 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"73c1f89c954b2acee10bb93e31a85e94d9b8e8af431d1351c2dc54d69535c33b"} Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.230872 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"11a8d53f71ff42a78498a1a31c9506df5daa4a92e8e53484041c2b8e4b2c4f6a"} Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.230958 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.231942 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.232000 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.232013 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:34 crc kubenswrapper[4716]: W1209 15:08:34.236208 4716 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.66:6443: connect: connection refused Dec 09 15:08:34 crc kubenswrapper[4716]: E1209 15:08:34.236294 4716 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.66:6443: connect: connection refused" logger="UnhandledError" Dec 09 15:08:34 crc kubenswrapper[4716]: W1209 15:08:34.320845 4716 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.66:6443: connect: connection refused Dec 09 15:08:34 crc kubenswrapper[4716]: E1209 15:08:34.320960 4716 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.66:6443: connect: connection refused" logger="UnhandledError" Dec 09 15:08:34 crc kubenswrapper[4716]: E1209 15:08:34.535377 4716 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.66:6443: connect: connection refused" interval="1.6s" Dec 09 15:08:34 crc kubenswrapper[4716]: W1209 15:08:34.576347 4716 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.66:6443: connect: connection refused Dec 09 15:08:34 crc kubenswrapper[4716]: E1209 15:08:34.576479 4716 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.66:6443: connect: connection refused" logger="UnhandledError" Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.784190 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.785646 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.785685 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.785697 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:34 crc kubenswrapper[4716]: I1209 15:08:34.785729 4716 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 09 15:08:34 crc kubenswrapper[4716]: E1209 15:08:34.786175 4716 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.66:6443: connect: connection refused" node="crc" Dec 09 15:08:35 crc kubenswrapper[4716]: I1209 15:08:35.125102 4716 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.66:6443: connect: connection refused Dec 09 15:08:35 crc kubenswrapper[4716]: I1209 15:08:35.173308 4716 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Dec 09 15:08:35 crc kubenswrapper[4716]: E1209 15:08:35.174817 4716 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.66:6443: connect: connection refused" logger="UnhandledError" Dec 09 15:08:35 crc kubenswrapper[4716]: I1209 15:08:35.235544 4716 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="b62285bb69b8af9786731a0ec7b5a2bf83adb8fc922f35db071731ab1f23ab18" exitCode=0 Dec 09 15:08:35 crc kubenswrapper[4716]: I1209 15:08:35.235655 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"b62285bb69b8af9786731a0ec7b5a2bf83adb8fc922f35db071731ab1f23ab18"} Dec 09 15:08:35 crc kubenswrapper[4716]: I1209 15:08:35.235882 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:35 crc kubenswrapper[4716]: I1209 15:08:35.236880 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:35 crc kubenswrapper[4716]: I1209 15:08:35.236917 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:35 crc kubenswrapper[4716]: I1209 15:08:35.236930 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:35 crc kubenswrapper[4716]: I1209 15:08:35.237997 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"5a495c11fb4691d37c344f26e79e7f3e65eba2b6869d139b2477d81c79013a8f"} Dec 09 15:08:35 crc kubenswrapper[4716]: I1209 15:08:35.238102 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:35 crc kubenswrapper[4716]: I1209 15:08:35.238889 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:35 crc kubenswrapper[4716]: I1209 15:08:35.238906 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:35 crc kubenswrapper[4716]: I1209 15:08:35.238914 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:35 crc kubenswrapper[4716]: I1209 15:08:35.240428 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"4977d7373e96d2e9d86ee20a1dadffb84228e1a466bce24e5b5af6c1c6ec93b8"} Dec 09 15:08:35 crc kubenswrapper[4716]: I1209 15:08:35.240462 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"51c336339c165a67392862787d5ffdab4fba030a838bcb28cecb6ef0a1b1633a"} Dec 09 15:08:35 crc kubenswrapper[4716]: I1209 15:08:35.242978 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a"} Dec 09 15:08:35 crc kubenswrapper[4716]: I1209 15:08:35.243073 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625"} Dec 09 15:08:35 crc kubenswrapper[4716]: I1209 15:08:35.245068 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db"} Dec 09 15:08:35 crc kubenswrapper[4716]: I1209 15:08:35.245097 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4"} Dec 09 15:08:36 crc kubenswrapper[4716]: I1209 15:08:36.251315 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b"} Dec 09 15:08:36 crc kubenswrapper[4716]: I1209 15:08:36.251390 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:36 crc kubenswrapper[4716]: I1209 15:08:36.252306 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:36 crc kubenswrapper[4716]: I1209 15:08:36.252344 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:36 crc kubenswrapper[4716]: I1209 15:08:36.252356 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:36 crc kubenswrapper[4716]: I1209 15:08:36.257445 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4"} Dec 09 15:08:36 crc kubenswrapper[4716]: I1209 15:08:36.257499 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575"} Dec 09 15:08:36 crc kubenswrapper[4716]: I1209 15:08:36.257508 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23"} Dec 09 15:08:36 crc kubenswrapper[4716]: I1209 15:08:36.257610 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:36 crc kubenswrapper[4716]: I1209 15:08:36.258422 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:36 crc kubenswrapper[4716]: I1209 15:08:36.258451 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:36 crc kubenswrapper[4716]: I1209 15:08:36.258458 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:36 crc kubenswrapper[4716]: I1209 15:08:36.260889 4716 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="d4cba2091f0b3d2bfce763b41a854b307ff86a2abf3ebf56dcbadce37ee149f5" exitCode=0 Dec 09 15:08:36 crc kubenswrapper[4716]: I1209 15:08:36.260932 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"d4cba2091f0b3d2bfce763b41a854b307ff86a2abf3ebf56dcbadce37ee149f5"} Dec 09 15:08:36 crc kubenswrapper[4716]: I1209 15:08:36.261003 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:36 crc kubenswrapper[4716]: I1209 15:08:36.266221 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:36 crc kubenswrapper[4716]: I1209 15:08:36.266255 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:36 crc kubenswrapper[4716]: I1209 15:08:36.266264 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:36 crc kubenswrapper[4716]: I1209 15:08:36.269016 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"4704af0bc981b22b47ed768b34abe27a70ec9445a3b86cbe81e44115c1e6db55"} Dec 09 15:08:36 crc kubenswrapper[4716]: I1209 15:08:36.269064 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:36 crc kubenswrapper[4716]: I1209 15:08:36.269770 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:36 crc kubenswrapper[4716]: I1209 15:08:36.269794 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:36 crc kubenswrapper[4716]: I1209 15:08:36.269803 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:36 crc kubenswrapper[4716]: I1209 15:08:36.386792 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:36 crc kubenswrapper[4716]: I1209 15:08:36.387934 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:36 crc kubenswrapper[4716]: I1209 15:08:36.387970 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:36 crc kubenswrapper[4716]: I1209 15:08:36.387980 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:36 crc kubenswrapper[4716]: I1209 15:08:36.388006 4716 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 09 15:08:37 crc kubenswrapper[4716]: I1209 15:08:37.277410 4716 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 09 15:08:37 crc kubenswrapper[4716]: I1209 15:08:37.277466 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:37 crc kubenswrapper[4716]: I1209 15:08:37.277490 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:37 crc kubenswrapper[4716]: I1209 15:08:37.277476 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"0ac6946b598c9186358ce80d7dfe09dddc7be095a9037265ef4d78a5d9560e1f"} Dec 09 15:08:37 crc kubenswrapper[4716]: I1209 15:08:37.277510 4716 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 09 15:08:37 crc kubenswrapper[4716]: I1209 15:08:37.277725 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:37 crc kubenswrapper[4716]: I1209 15:08:37.277580 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"8396257b714afac16cd906de4f119eb5875484a204f3071303336dfc00106118"} Dec 09 15:08:37 crc kubenswrapper[4716]: I1209 15:08:37.277834 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"db112dc4ac916254c8ca33290be6ad4d91b85d7c9e17872e67916df15ae25428"} Dec 09 15:08:37 crc kubenswrapper[4716]: I1209 15:08:37.277877 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"b730df9d371dd6777ff1ba46c584e9cf3b1a99d65d0a1880413036962d7db099"} Dec 09 15:08:37 crc kubenswrapper[4716]: I1209 15:08:37.277892 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"a355c46f0a4ee9ca369b17ec4b24b554d0eae49d84a5ce7ce318a92e37d46cd0"} Dec 09 15:08:37 crc kubenswrapper[4716]: I1209 15:08:37.277880 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:37 crc kubenswrapper[4716]: I1209 15:08:37.278924 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:37 crc kubenswrapper[4716]: I1209 15:08:37.278967 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:37 crc kubenswrapper[4716]: I1209 15:08:37.278981 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:37 crc kubenswrapper[4716]: I1209 15:08:37.279006 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:37 crc kubenswrapper[4716]: I1209 15:08:37.279047 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:37 crc kubenswrapper[4716]: I1209 15:08:37.279070 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:37 crc kubenswrapper[4716]: I1209 15:08:37.279264 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:37 crc kubenswrapper[4716]: I1209 15:08:37.279311 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:37 crc kubenswrapper[4716]: I1209 15:08:37.279330 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:37 crc kubenswrapper[4716]: I1209 15:08:37.281029 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:37 crc kubenswrapper[4716]: I1209 15:08:37.281054 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:37 crc kubenswrapper[4716]: I1209 15:08:37.281069 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:37 crc kubenswrapper[4716]: I1209 15:08:37.388261 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:08:38 crc kubenswrapper[4716]: I1209 15:08:38.280193 4716 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 09 15:08:38 crc kubenswrapper[4716]: I1209 15:08:38.280276 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:38 crc kubenswrapper[4716]: I1209 15:08:38.280271 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:38 crc kubenswrapper[4716]: I1209 15:08:38.281351 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:38 crc kubenswrapper[4716]: I1209 15:08:38.281388 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:38 crc kubenswrapper[4716]: I1209 15:08:38.281407 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:38 crc kubenswrapper[4716]: I1209 15:08:38.282286 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:38 crc kubenswrapper[4716]: I1209 15:08:38.282310 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:38 crc kubenswrapper[4716]: I1209 15:08:38.282321 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:38 crc kubenswrapper[4716]: I1209 15:08:38.443713 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Dec 09 15:08:38 crc kubenswrapper[4716]: I1209 15:08:38.973585 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:08:39 crc kubenswrapper[4716]: I1209 15:08:39.006939 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:08:39 crc kubenswrapper[4716]: I1209 15:08:39.283345 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:39 crc kubenswrapper[4716]: I1209 15:08:39.283387 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:39 crc kubenswrapper[4716]: I1209 15:08:39.285056 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:39 crc kubenswrapper[4716]: I1209 15:08:39.285095 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:39 crc kubenswrapper[4716]: I1209 15:08:39.285105 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:39 crc kubenswrapper[4716]: I1209 15:08:39.285410 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:39 crc kubenswrapper[4716]: I1209 15:08:39.285466 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:39 crc kubenswrapper[4716]: I1209 15:08:39.285483 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:39 crc kubenswrapper[4716]: I1209 15:08:39.494146 4716 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Dec 09 15:08:40 crc kubenswrapper[4716]: I1209 15:08:40.286155 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:40 crc kubenswrapper[4716]: I1209 15:08:40.287285 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:40 crc kubenswrapper[4716]: I1209 15:08:40.287338 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:40 crc kubenswrapper[4716]: I1209 15:08:40.287356 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:40 crc kubenswrapper[4716]: I1209 15:08:40.332269 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 15:08:40 crc kubenswrapper[4716]: I1209 15:08:40.332513 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:40 crc kubenswrapper[4716]: I1209 15:08:40.334013 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:40 crc kubenswrapper[4716]: I1209 15:08:40.334050 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:40 crc kubenswrapper[4716]: I1209 15:08:40.334062 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:40 crc kubenswrapper[4716]: I1209 15:08:40.360777 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 15:08:41 crc kubenswrapper[4716]: I1209 15:08:41.288377 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:41 crc kubenswrapper[4716]: I1209 15:08:41.289267 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:41 crc kubenswrapper[4716]: I1209 15:08:41.289310 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:41 crc kubenswrapper[4716]: I1209 15:08:41.289323 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:41 crc kubenswrapper[4716]: I1209 15:08:41.763524 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 15:08:42 crc kubenswrapper[4716]: I1209 15:08:42.087998 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Dec 09 15:08:42 crc kubenswrapper[4716]: I1209 15:08:42.088201 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:42 crc kubenswrapper[4716]: I1209 15:08:42.089331 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:42 crc kubenswrapper[4716]: I1209 15:08:42.089363 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:42 crc kubenswrapper[4716]: I1209 15:08:42.089370 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:42 crc kubenswrapper[4716]: I1209 15:08:42.158455 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 09 15:08:42 crc kubenswrapper[4716]: I1209 15:08:42.158713 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:42 crc kubenswrapper[4716]: I1209 15:08:42.159849 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:42 crc kubenswrapper[4716]: I1209 15:08:42.159888 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:42 crc kubenswrapper[4716]: I1209 15:08:42.159900 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:42 crc kubenswrapper[4716]: I1209 15:08:42.290466 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:42 crc kubenswrapper[4716]: I1209 15:08:42.291665 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:42 crc kubenswrapper[4716]: I1209 15:08:42.291712 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:42 crc kubenswrapper[4716]: I1209 15:08:42.291724 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:43 crc kubenswrapper[4716]: E1209 15:08:43.281860 4716 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Dec 09 15:08:43 crc kubenswrapper[4716]: I1209 15:08:43.361294 4716 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 09 15:08:43 crc kubenswrapper[4716]: I1209 15:08:43.361414 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 09 15:08:44 crc kubenswrapper[4716]: I1209 15:08:44.071431 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 15:08:44 crc kubenswrapper[4716]: I1209 15:08:44.071654 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:44 crc kubenswrapper[4716]: I1209 15:08:44.073053 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:44 crc kubenswrapper[4716]: I1209 15:08:44.073112 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:44 crc kubenswrapper[4716]: I1209 15:08:44.073125 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:44 crc kubenswrapper[4716]: I1209 15:08:44.077046 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 15:08:44 crc kubenswrapper[4716]: I1209 15:08:44.295240 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:44 crc kubenswrapper[4716]: I1209 15:08:44.296087 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:44 crc kubenswrapper[4716]: I1209 15:08:44.296120 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:44 crc kubenswrapper[4716]: I1209 15:08:44.296132 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:44 crc kubenswrapper[4716]: I1209 15:08:44.300074 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 15:08:45 crc kubenswrapper[4716]: I1209 15:08:45.297026 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:45 crc kubenswrapper[4716]: I1209 15:08:45.298026 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:45 crc kubenswrapper[4716]: I1209 15:08:45.298130 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:45 crc kubenswrapper[4716]: I1209 15:08:45.298222 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:45 crc kubenswrapper[4716]: I1209 15:08:45.646240 4716 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Dec 09 15:08:45 crc kubenswrapper[4716]: I1209 15:08:45.646304 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Dec 09 15:08:45 crc kubenswrapper[4716]: I1209 15:08:45.665961 4716 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Dec 09 15:08:45 crc kubenswrapper[4716]: I1209 15:08:45.666030 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Dec 09 15:08:47 crc kubenswrapper[4716]: I1209 15:08:47.322575 4716 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Dec 09 15:08:47 crc kubenswrapper[4716]: I1209 15:08:47.323035 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Dec 09 15:08:48 crc kubenswrapper[4716]: I1209 15:08:48.487833 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Dec 09 15:08:48 crc kubenswrapper[4716]: I1209 15:08:48.488056 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:48 crc kubenswrapper[4716]: I1209 15:08:48.489608 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:48 crc kubenswrapper[4716]: I1209 15:08:48.489684 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:48 crc kubenswrapper[4716]: I1209 15:08:48.489703 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:48 crc kubenswrapper[4716]: I1209 15:08:48.501872 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Dec 09 15:08:48 crc kubenswrapper[4716]: I1209 15:08:48.974860 4716 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Dec 09 15:08:48 crc kubenswrapper[4716]: I1209 15:08:48.974945 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Dec 09 15:08:49 crc kubenswrapper[4716]: I1209 15:08:49.015407 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:08:49 crc kubenswrapper[4716]: I1209 15:08:49.015614 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:49 crc kubenswrapper[4716]: I1209 15:08:49.016146 4716 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Dec 09 15:08:49 crc kubenswrapper[4716]: I1209 15:08:49.016224 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Dec 09 15:08:49 crc kubenswrapper[4716]: I1209 15:08:49.017001 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:49 crc kubenswrapper[4716]: I1209 15:08:49.017053 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:49 crc kubenswrapper[4716]: I1209 15:08:49.017068 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:49 crc kubenswrapper[4716]: I1209 15:08:49.019896 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:08:49 crc kubenswrapper[4716]: I1209 15:08:49.305133 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:49 crc kubenswrapper[4716]: I1209 15:08:49.305147 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:49 crc kubenswrapper[4716]: I1209 15:08:49.305581 4716 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Dec 09 15:08:49 crc kubenswrapper[4716]: I1209 15:08:49.305740 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Dec 09 15:08:49 crc kubenswrapper[4716]: I1209 15:08:49.306590 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:49 crc kubenswrapper[4716]: I1209 15:08:49.306652 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:49 crc kubenswrapper[4716]: I1209 15:08:49.306666 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:49 crc kubenswrapper[4716]: I1209 15:08:49.306679 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:49 crc kubenswrapper[4716]: I1209 15:08:49.306686 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:49 crc kubenswrapper[4716]: I1209 15:08:49.306689 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:50 crc kubenswrapper[4716]: E1209 15:08:50.632849 4716 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="3.2s" Dec 09 15:08:50 crc kubenswrapper[4716]: I1209 15:08:50.636385 4716 trace.go:236] Trace[1880733052]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (09-Dec-2025 15:08:36.395) (total time: 14240ms): Dec 09 15:08:50 crc kubenswrapper[4716]: Trace[1880733052]: ---"Objects listed" error: 14240ms (15:08:50.636) Dec 09 15:08:50 crc kubenswrapper[4716]: Trace[1880733052]: [14.24044492s] [14.24044492s] END Dec 09 15:08:50 crc kubenswrapper[4716]: I1209 15:08:50.636428 4716 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Dec 09 15:08:50 crc kubenswrapper[4716]: I1209 15:08:50.636905 4716 trace.go:236] Trace[229240709]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (09-Dec-2025 15:08:37.598) (total time: 13038ms): Dec 09 15:08:50 crc kubenswrapper[4716]: Trace[229240709]: ---"Objects listed" error: 13038ms (15:08:50.636) Dec 09 15:08:50 crc kubenswrapper[4716]: Trace[229240709]: [13.038635856s] [13.038635856s] END Dec 09 15:08:50 crc kubenswrapper[4716]: I1209 15:08:50.636957 4716 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Dec 09 15:08:50 crc kubenswrapper[4716]: I1209 15:08:50.637395 4716 trace.go:236] Trace[374291062]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (09-Dec-2025 15:08:35.877) (total time: 14759ms): Dec 09 15:08:50 crc kubenswrapper[4716]: Trace[374291062]: ---"Objects listed" error: 14759ms (15:08:50.637) Dec 09 15:08:50 crc kubenswrapper[4716]: Trace[374291062]: [14.759810579s] [14.759810579s] END Dec 09 15:08:50 crc kubenswrapper[4716]: I1209 15:08:50.637645 4716 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Dec 09 15:08:50 crc kubenswrapper[4716]: E1209 15:08:50.637449 4716 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Dec 09 15:08:50 crc kubenswrapper[4716]: I1209 15:08:50.638004 4716 trace.go:236] Trace[772713892]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (09-Dec-2025 15:08:35.936) (total time: 14701ms): Dec 09 15:08:50 crc kubenswrapper[4716]: Trace[772713892]: ---"Objects listed" error: 14701ms (15:08:50.637) Dec 09 15:08:50 crc kubenswrapper[4716]: Trace[772713892]: [14.701155747s] [14.701155747s] END Dec 09 15:08:50 crc kubenswrapper[4716]: I1209 15:08:50.638455 4716 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Dec 09 15:08:50 crc kubenswrapper[4716]: I1209 15:08:50.660984 4716 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Dec 09 15:08:50 crc kubenswrapper[4716]: I1209 15:08:50.685125 4716 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Dec 09 15:08:50 crc kubenswrapper[4716]: I1209 15:08:50.722301 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 15:08:50 crc kubenswrapper[4716]: I1209 15:08:50.726079 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.127258 4716 apiserver.go:52] "Watching apiserver" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.132082 4716 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.132650 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf"] Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.133126 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.133265 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.133665 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:08:51 crc kubenswrapper[4716]: E1209 15:08:51.133741 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:08:51 crc kubenswrapper[4716]: E1209 15:08:51.133662 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.134494 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.134693 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.134949 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:08:51 crc kubenswrapper[4716]: E1209 15:08:51.135011 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.135387 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.135388 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.135732 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.136633 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.136801 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.136863 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.137139 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.138007 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.139727 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.167244 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.188400 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.188463 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.188507 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.190365 4716 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.192237 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.197441 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.208469 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.233069 4716 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.233671 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.246428 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.257645 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.267878 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.278778 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.289133 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.289181 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.289200 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.289217 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.289235 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.289253 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.289270 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.289284 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.289303 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.289317 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.289332 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.289355 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.289370 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.289395 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.289477 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.289494 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.289535 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.289567 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.289594 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.289644 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.289671 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.289692 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.289712 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.289750 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.289768 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.289762 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.289802 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.289840 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.289876 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.289906 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.289929 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.289980 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290017 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290020 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290035 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290059 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290082 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290108 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290134 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Dec 09 15:08:51 crc kubenswrapper[4716]: E1209 15:08:51.290162 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:08:51.790135501 +0000 UTC m=+18.944879489 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290204 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290239 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290267 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290297 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290321 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290332 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290345 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290371 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290397 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290398 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290418 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290423 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290441 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290495 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290530 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290559 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290533 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290590 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290599 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290584 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290721 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290753 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290756 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290794 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290804 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290813 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290852 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290877 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290903 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290931 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290955 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290970 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.290979 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291007 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291032 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291053 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291076 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291099 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291122 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291144 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291156 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291159 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291169 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291208 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291219 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291224 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291241 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291266 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291287 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291307 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291325 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291341 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291363 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291382 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291399 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291399 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291406 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291424 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291416 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291459 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291471 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291476 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291499 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291514 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291536 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291555 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291572 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291590 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291605 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291638 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291654 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291654 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291666 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291674 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291647 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291716 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291698 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291743 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291849 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291896 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291931 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291963 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291998 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292035 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292068 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292097 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292127 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292153 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292187 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292218 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292245 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292271 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292301 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292328 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292355 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292386 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292411 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292449 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292478 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292505 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292530 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292547 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292563 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292580 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292650 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292724 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292745 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292764 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292784 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292803 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292824 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292849 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292875 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292899 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292929 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292951 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292971 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292990 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.293008 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.293028 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.293047 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.293063 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.293088 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.293108 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.293126 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.293147 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.293174 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.293197 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.293214 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.293239 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.293317 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.293342 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.293368 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.293395 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.293419 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.293445 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.293470 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.293495 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.293519 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.293544 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.293574 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.293599 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.294260 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.294304 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.294331 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.294359 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.294387 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.294413 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.294441 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.294468 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.294495 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.294555 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.294583 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.294613 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.294665 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.294691 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.294715 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.294740 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.294764 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.294789 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.294816 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.294841 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.294866 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.294894 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.294916 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.294942 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.294966 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.294995 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.295027 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.295054 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.295079 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.295111 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.295148 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.295182 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.295210 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.295236 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.295261 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.295285 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.295319 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.295349 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.295374 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.295401 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.295429 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.295513 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.295540 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.295573 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.295608 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.295651 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.295676 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.295713 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.295738 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.295757 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.295780 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.295834 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.295867 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.295989 4716 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.296008 4716 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.296024 4716 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.296039 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.296053 4716 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.296075 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.296090 4716 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.296103 4716 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.296117 4716 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.296133 4716 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.296143 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.296156 4716 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.296171 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.296186 4716 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.296200 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.296220 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.296234 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.296250 4716 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.296262 4716 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.296277 4716 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.296295 4716 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.296310 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.296326 4716 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.296339 4716 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.296355 4716 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.296369 4716 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.296383 4716 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291763 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291845 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291884 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291940 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.291966 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292034 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.296741 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292183 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292251 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292278 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292418 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292443 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292570 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292595 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292654 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.292914 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.293021 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.293351 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.293777 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.293933 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.294411 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.294563 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.294595 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.294595 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.294646 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.294996 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.295067 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.295125 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.295282 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.295398 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.295405 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.295554 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.295573 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.295769 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.295798 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.296056 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.296954 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.297250 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.296901 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.297511 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.297559 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.297551 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.297679 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.297788 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.297794 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.298026 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.298038 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.298050 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.298220 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.298355 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.298383 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.298445 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.298734 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.298829 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.298859 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.298927 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.298529 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.299687 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.299975 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.300105 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.300163 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.300178 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.300230 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.301141 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.301241 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.301254 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.300947 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.302040 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.302446 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.302773 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.302975 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.296374 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.296344 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.296666 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.296747 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.303709 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.303781 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.303916 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.303928 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.304077 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.304219 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.304184 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.304343 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.304342 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.304696 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.304857 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.304996 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: E1209 15:08:51.305058 4716 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 09 15:08:51 crc kubenswrapper[4716]: E1209 15:08:51.305137 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-09 15:08:51.805117008 +0000 UTC m=+18.959860996 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.305226 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.305244 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.305851 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.305604 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.305895 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.305832 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.306013 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.306332 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.306395 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: E1209 15:08:51.306568 4716 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 09 15:08:51 crc kubenswrapper[4716]: E1209 15:08:51.306663 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-09 15:08:51.806649182 +0000 UTC m=+18.961393370 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.306663 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.306707 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.306806 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.307303 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.307319 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.307437 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.308015 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.308015 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.308398 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.308443 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.308612 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.309800 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.310563 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.310803 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.311012 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.311099 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.311185 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.311211 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.308129 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.311705 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.311853 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.312037 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.312094 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.310869 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.312612 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.313049 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.313345 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.313438 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.320928 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.321374 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.321865 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.321911 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.321974 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.322055 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.322475 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.322716 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.322841 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.323288 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.323348 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.323504 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.323584 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: E1209 15:08:51.323691 4716 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 09 15:08:51 crc kubenswrapper[4716]: E1209 15:08:51.323724 4716 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 09 15:08:51 crc kubenswrapper[4716]: E1209 15:08:51.323786 4716 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.323585 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: E1209 15:08:51.323903 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-09 15:08:51.823845032 +0000 UTC m=+18.978589010 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.324022 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.324261 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.324130 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.324487 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: E1209 15:08:51.324639 4716 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.324651 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: E1209 15:08:51.324665 4716 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 09 15:08:51 crc kubenswrapper[4716]: E1209 15:08:51.324685 4716 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.324724 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: E1209 15:08:51.324760 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-09 15:08:51.824734077 +0000 UTC m=+18.979478255 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.324859 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.324962 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.325023 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.325034 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.325131 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.325149 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.325550 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.325888 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.325891 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.325929 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.325984 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.326278 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.324342 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.327656 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.328181 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.328928 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.329442 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.329443 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.329524 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.329724 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.330660 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.330534 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.332385 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.332470 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.333465 4716 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4" exitCode=255 Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.333597 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4"} Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.334148 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.337896 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.339544 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.340198 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 09 15:08:51 crc kubenswrapper[4716]: E1209 15:08:51.341867 4716 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-controller-manager-crc\" already exists" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.347217 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.348127 4716 scope.go:117] "RemoveContainer" containerID="ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.357724 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.359049 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.364321 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.369574 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.369947 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.381576 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.387548 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.397576 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.397731 4716 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.397745 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.397755 4716 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.397765 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.397777 4716 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.397786 4716 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.397816 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.397824 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.397832 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.397841 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.397851 4716 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.397862 4716 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.397894 4716 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.397904 4716 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.397912 4716 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.397921 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.397932 4716 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.397943 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.397971 4716 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.397981 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.397990 4716 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.397999 4716 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398008 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398018 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398046 4716 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398058 4716 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398067 4716 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398076 4716 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398084 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398093 4716 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398102 4716 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398130 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398140 4716 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398149 4716 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398159 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398167 4716 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398177 4716 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398186 4716 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398219 4716 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398211 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398230 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398319 4716 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398349 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398361 4716 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398371 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398383 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398394 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398421 4716 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398432 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398442 4716 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398452 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398462 4716 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398472 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398497 4716 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398507 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398517 4716 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398527 4716 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398536 4716 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398545 4716 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398553 4716 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398577 4716 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398586 4716 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398596 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398613 4716 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398641 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398651 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398662 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398672 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398685 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398694 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398720 4716 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398730 4716 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398742 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398752 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398762 4716 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398772 4716 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398798 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398808 4716 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398819 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398829 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398839 4716 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398848 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398872 4716 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398883 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398892 4716 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398902 4716 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398913 4716 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398923 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398947 4716 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398957 4716 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398967 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398978 4716 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398988 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.398997 4716 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399008 4716 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399033 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399043 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399052 4716 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399061 4716 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399072 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399082 4716 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399107 4716 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399119 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399128 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399140 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399150 4716 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399160 4716 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399183 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399193 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399202 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399212 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399221 4716 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399231 4716 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399240 4716 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399264 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399275 4716 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399285 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399295 4716 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399305 4716 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399348 4716 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399373 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399384 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399396 4716 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399406 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399449 4716 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399459 4716 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399476 4716 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399558 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399605 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399640 4716 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399651 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399661 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399673 4716 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399690 4716 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399720 4716 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399731 4716 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399740 4716 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399752 4716 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399762 4716 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399773 4716 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399798 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399808 4716 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399817 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399827 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399837 4716 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399847 4716 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399873 4716 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399883 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399893 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399902 4716 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399911 4716 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399923 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399933 4716 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399958 4716 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399967 4716 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399977 4716 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399988 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.399998 4716 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.400007 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.400017 4716 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.400028 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.400053 4716 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.400063 4716 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.400072 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.400081 4716 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.400090 4716 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.400100 4716 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.400112 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.400122 4716 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.409805 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.425137 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.438064 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.456267 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.464594 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 09 15:08:51 crc kubenswrapper[4716]: W1209 15:08:51.468116 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-d927561d3ed8c9c8610f52dc8704f23a379944503d6905a12244fcae9b2d107f WatchSource:0}: Error finding container d927561d3ed8c9c8610f52dc8704f23a379944503d6905a12244fcae9b2d107f: Status 404 returned error can't find the container with id d927561d3ed8c9c8610f52dc8704f23a379944503d6905a12244fcae9b2d107f Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.473386 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 09 15:08:51 crc kubenswrapper[4716]: W1209 15:08:51.487777 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-5bb75752ba5ad5195d0fff3e87e37fc5758133c537c59ba2c2104661707af36b WatchSource:0}: Error finding container 5bb75752ba5ad5195d0fff3e87e37fc5758133c537c59ba2c2104661707af36b: Status 404 returned error can't find the container with id 5bb75752ba5ad5195d0fff3e87e37fc5758133c537c59ba2c2104661707af36b Dec 09 15:08:51 crc kubenswrapper[4716]: W1209 15:08:51.488479 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-ec733942b61e8dc3148935ccc8cfdad5c99fec1cb7363a5f4b3b7e7622d37935 WatchSource:0}: Error finding container ec733942b61e8dc3148935ccc8cfdad5c99fec1cb7363a5f4b3b7e7622d37935: Status 404 returned error can't find the container with id ec733942b61e8dc3148935ccc8cfdad5c99fec1cb7363a5f4b3b7e7622d37935 Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.803497 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:08:51 crc kubenswrapper[4716]: E1209 15:08:51.803750 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:08:52.803719627 +0000 UTC m=+19.958463615 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.906087 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:08:51 crc kubenswrapper[4716]: E1209 15:08:51.906279 4716 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 09 15:08:51 crc kubenswrapper[4716]: E1209 15:08:51.906568 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-09 15:08:52.906540628 +0000 UTC m=+20.061284616 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.906446 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.906731 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:08:51 crc kubenswrapper[4716]: I1209 15:08:51.906787 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:08:51 crc kubenswrapper[4716]: E1209 15:08:51.906754 4716 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 09 15:08:51 crc kubenswrapper[4716]: E1209 15:08:51.906979 4716 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 09 15:08:51 crc kubenswrapper[4716]: E1209 15:08:51.907069 4716 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 15:08:51 crc kubenswrapper[4716]: E1209 15:08:51.907190 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-09 15:08:52.907173466 +0000 UTC m=+20.061917454 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 15:08:51 crc kubenswrapper[4716]: E1209 15:08:51.906938 4716 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 09 15:08:51 crc kubenswrapper[4716]: E1209 15:08:51.907363 4716 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 09 15:08:51 crc kubenswrapper[4716]: E1209 15:08:51.907427 4716 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 15:08:51 crc kubenswrapper[4716]: E1209 15:08:51.907512 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-09 15:08:52.907502785 +0000 UTC m=+20.062246773 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 15:08:51 crc kubenswrapper[4716]: E1209 15:08:51.906936 4716 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 09 15:08:51 crc kubenswrapper[4716]: E1209 15:08:51.907650 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-09 15:08:52.907635729 +0000 UTC m=+20.062379777 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 09 15:08:52 crc kubenswrapper[4716]: I1209 15:08:52.212535 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:08:52 crc kubenswrapper[4716]: E1209 15:08:52.212690 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:08:52 crc kubenswrapper[4716]: I1209 15:08:52.338876 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Dec 09 15:08:52 crc kubenswrapper[4716]: I1209 15:08:52.340984 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e"} Dec 09 15:08:52 crc kubenswrapper[4716]: I1209 15:08:52.341218 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:08:52 crc kubenswrapper[4716]: I1209 15:08:52.342511 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"ec733942b61e8dc3148935ccc8cfdad5c99fec1cb7363a5f4b3b7e7622d37935"} Dec 09 15:08:52 crc kubenswrapper[4716]: I1209 15:08:52.344761 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f"} Dec 09 15:08:52 crc kubenswrapper[4716]: I1209 15:08:52.344803 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601"} Dec 09 15:08:52 crc kubenswrapper[4716]: I1209 15:08:52.344822 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"5bb75752ba5ad5195d0fff3e87e37fc5758133c537c59ba2c2104661707af36b"} Dec 09 15:08:52 crc kubenswrapper[4716]: I1209 15:08:52.346706 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9"} Dec 09 15:08:52 crc kubenswrapper[4716]: I1209 15:08:52.346748 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"d927561d3ed8c9c8610f52dc8704f23a379944503d6905a12244fcae9b2d107f"} Dec 09 15:08:52 crc kubenswrapper[4716]: I1209 15:08:52.360675 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:52Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:52 crc kubenswrapper[4716]: I1209 15:08:52.376832 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:52Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:52 crc kubenswrapper[4716]: I1209 15:08:52.390888 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:52Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:52 crc kubenswrapper[4716]: I1209 15:08:52.407119 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:52Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:52 crc kubenswrapper[4716]: I1209 15:08:52.424865 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:52Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:52 crc kubenswrapper[4716]: I1209 15:08:52.439493 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:52Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:52 crc kubenswrapper[4716]: I1209 15:08:52.458591 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:52Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:52 crc kubenswrapper[4716]: I1209 15:08:52.475154 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:52Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:52 crc kubenswrapper[4716]: I1209 15:08:52.489566 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:52Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:52 crc kubenswrapper[4716]: I1209 15:08:52.508684 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:52Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:52 crc kubenswrapper[4716]: I1209 15:08:52.522913 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:52Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:52 crc kubenswrapper[4716]: I1209 15:08:52.537549 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:52Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:52 crc kubenswrapper[4716]: I1209 15:08:52.553864 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:52Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:52 crc kubenswrapper[4716]: I1209 15:08:52.569465 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:52Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:52 crc kubenswrapper[4716]: I1209 15:08:52.584230 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:52Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:52 crc kubenswrapper[4716]: I1209 15:08:52.601008 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:52Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:52 crc kubenswrapper[4716]: I1209 15:08:52.816130 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:08:52 crc kubenswrapper[4716]: E1209 15:08:52.816331 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:08:54.816309484 +0000 UTC m=+21.971053472 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:08:52 crc kubenswrapper[4716]: I1209 15:08:52.917237 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:08:52 crc kubenswrapper[4716]: I1209 15:08:52.917281 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:08:52 crc kubenswrapper[4716]: I1209 15:08:52.917303 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:08:52 crc kubenswrapper[4716]: I1209 15:08:52.917324 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:08:52 crc kubenswrapper[4716]: E1209 15:08:52.917461 4716 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 09 15:08:52 crc kubenswrapper[4716]: E1209 15:08:52.917519 4716 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 09 15:08:52 crc kubenswrapper[4716]: E1209 15:08:52.917533 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-09 15:08:54.917510428 +0000 UTC m=+22.072254416 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 09 15:08:52 crc kubenswrapper[4716]: E1209 15:08:52.917890 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-09 15:08:54.917868648 +0000 UTC m=+22.072612746 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 09 15:08:52 crc kubenswrapper[4716]: E1209 15:08:52.917844 4716 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 09 15:08:52 crc kubenswrapper[4716]: E1209 15:08:52.917948 4716 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 09 15:08:52 crc kubenswrapper[4716]: E1209 15:08:52.917970 4716 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 15:08:52 crc kubenswrapper[4716]: E1209 15:08:52.918031 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-09 15:08:54.918020792 +0000 UTC m=+22.072764770 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 15:08:52 crc kubenswrapper[4716]: E1209 15:08:52.918293 4716 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 09 15:08:52 crc kubenswrapper[4716]: E1209 15:08:52.918391 4716 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 09 15:08:52 crc kubenswrapper[4716]: E1209 15:08:52.918455 4716 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 15:08:52 crc kubenswrapper[4716]: E1209 15:08:52.918680 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-09 15:08:54.91865846 +0000 UTC m=+22.073402448 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.212919 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:08:53 crc kubenswrapper[4716]: E1209 15:08:53.213142 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.213573 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:08:53 crc kubenswrapper[4716]: E1209 15:08:53.213910 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.218241 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.219592 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.222216 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.224272 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.226519 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.228083 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.229445 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.230804 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.232144 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.233442 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.233453 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:53Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.234697 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.236562 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.237950 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.238759 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.239546 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.240348 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.241249 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.241843 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.242576 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.243421 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.244092 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.244898 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.245531 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.246586 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.247217 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.249182 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.250388 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.251081 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.251967 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.252748 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.253294 4716 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.253415 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.253989 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:53Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.254854 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.255414 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.255853 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.257314 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.258151 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.258758 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.259393 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.260134 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.260662 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.261265 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.263211 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.264346 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.265918 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.266860 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.268342 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.269799 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.271339 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.271443 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:53Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.272350 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.273924 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.275137 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.276121 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.277655 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.293847 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:53Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.313793 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:53Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.329908 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:53Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.344880 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:53Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.361714 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:53Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.838592 4716 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.840511 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.840544 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.840556 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.840613 4716 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.848867 4716 kubelet_node_status.go:115] "Node was previously registered" node="crc" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.849291 4716 kubelet_node_status.go:79] "Successfully registered node" node="crc" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.850792 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.850937 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.851039 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.851149 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.851244 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:53Z","lastTransitionTime":"2025-12-09T15:08:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:53 crc kubenswrapper[4716]: E1209 15:08:53.869539 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d76960a-8b61-4af0-9ff6-3ba0f6120862\\\",\\\"systemUUID\\\":\\\"9da27605-fb2f-423a-b7bb-978a678a6bed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:53Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.874281 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.874503 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.874571 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.874691 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.874756 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:53Z","lastTransitionTime":"2025-12-09T15:08:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:53 crc kubenswrapper[4716]: E1209 15:08:53.889992 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d76960a-8b61-4af0-9ff6-3ba0f6120862\\\",\\\"systemUUID\\\":\\\"9da27605-fb2f-423a-b7bb-978a678a6bed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:53Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.893815 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.894146 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.894275 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.894372 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.894444 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:53Z","lastTransitionTime":"2025-12-09T15:08:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:53 crc kubenswrapper[4716]: E1209 15:08:53.909253 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d76960a-8b61-4af0-9ff6-3ba0f6120862\\\",\\\"systemUUID\\\":\\\"9da27605-fb2f-423a-b7bb-978a678a6bed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:53Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.914219 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.914279 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.914295 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.914333 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.914349 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:53Z","lastTransitionTime":"2025-12-09T15:08:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:53 crc kubenswrapper[4716]: E1209 15:08:53.926723 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d76960a-8b61-4af0-9ff6-3ba0f6120862\\\",\\\"systemUUID\\\":\\\"9da27605-fb2f-423a-b7bb-978a678a6bed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:53Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.930606 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.930659 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.930675 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.930692 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.930703 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:53Z","lastTransitionTime":"2025-12-09T15:08:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:53 crc kubenswrapper[4716]: E1209 15:08:53.947246 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d76960a-8b61-4af0-9ff6-3ba0f6120862\\\",\\\"systemUUID\\\":\\\"9da27605-fb2f-423a-b7bb-978a678a6bed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:53Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:53 crc kubenswrapper[4716]: E1209 15:08:53.947455 4716 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.949716 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.949775 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.949786 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.949806 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:53 crc kubenswrapper[4716]: I1209 15:08:53.949819 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:53Z","lastTransitionTime":"2025-12-09T15:08:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.052644 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.052697 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.052709 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.052725 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.052734 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:54Z","lastTransitionTime":"2025-12-09T15:08:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.156471 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.156521 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.156531 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.156550 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.156567 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:54Z","lastTransitionTime":"2025-12-09T15:08:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.212818 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:08:54 crc kubenswrapper[4716]: E1209 15:08:54.212997 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.259000 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.259037 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.259046 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.259060 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.259069 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:54Z","lastTransitionTime":"2025-12-09T15:08:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.354012 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c"} Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.361467 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.361808 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.361907 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.362011 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.362160 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:54Z","lastTransitionTime":"2025-12-09T15:08:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.381833 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:54Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.398254 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:54Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.413122 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:54Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.426161 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:54Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.439924 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:54Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.456637 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:54Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.464999 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.465071 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.465082 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.465100 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.465112 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:54Z","lastTransitionTime":"2025-12-09T15:08:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.473759 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:54Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.486268 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:54Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.567727 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.567781 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.567794 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.567812 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.567825 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:54Z","lastTransitionTime":"2025-12-09T15:08:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.671066 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.671146 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.671165 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.671192 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.671215 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:54Z","lastTransitionTime":"2025-12-09T15:08:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.774438 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.774531 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.774557 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.774597 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.774657 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:54Z","lastTransitionTime":"2025-12-09T15:08:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.834900 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:08:54 crc kubenswrapper[4716]: E1209 15:08:54.835101 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:08:58.835080433 +0000 UTC m=+25.989824421 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.877223 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.877286 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.877303 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.877329 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.877348 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:54Z","lastTransitionTime":"2025-12-09T15:08:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.936220 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.936283 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.936310 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.936336 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:08:54 crc kubenswrapper[4716]: E1209 15:08:54.936445 4716 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 09 15:08:54 crc kubenswrapper[4716]: E1209 15:08:54.936444 4716 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 09 15:08:54 crc kubenswrapper[4716]: E1209 15:08:54.936499 4716 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 09 15:08:54 crc kubenswrapper[4716]: E1209 15:08:54.936554 4716 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 09 15:08:54 crc kubenswrapper[4716]: E1209 15:08:54.936571 4716 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 15:08:54 crc kubenswrapper[4716]: E1209 15:08:54.936510 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-09 15:08:58.936485343 +0000 UTC m=+26.091229331 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 09 15:08:54 crc kubenswrapper[4716]: E1209 15:08:54.936648 4716 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 09 15:08:54 crc kubenswrapper[4716]: E1209 15:08:54.936707 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-09 15:08:58.936662518 +0000 UTC m=+26.091406516 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 09 15:08:54 crc kubenswrapper[4716]: E1209 15:08:54.936715 4716 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 09 15:08:54 crc kubenswrapper[4716]: E1209 15:08:54.936732 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-09 15:08:58.936720639 +0000 UTC m=+26.091464637 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 15:08:54 crc kubenswrapper[4716]: E1209 15:08:54.936734 4716 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 15:08:54 crc kubenswrapper[4716]: E1209 15:08:54.936815 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-09 15:08:58.936793941 +0000 UTC m=+26.091537919 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.980221 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.980271 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.980283 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.980300 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:54 crc kubenswrapper[4716]: I1209 15:08:54.980312 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:54Z","lastTransitionTime":"2025-12-09T15:08:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.082239 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.082275 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.082283 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.082297 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.082310 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:55Z","lastTransitionTime":"2025-12-09T15:08:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.185076 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.185160 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.185186 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.185223 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.185253 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:55Z","lastTransitionTime":"2025-12-09T15:08:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.213667 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.213691 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:08:55 crc kubenswrapper[4716]: E1209 15:08:55.213860 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:08:55 crc kubenswrapper[4716]: E1209 15:08:55.214004 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.288432 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.288489 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.288499 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.288517 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.288529 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:55Z","lastTransitionTime":"2025-12-09T15:08:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.390494 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.390534 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.390544 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.390558 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.390568 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:55Z","lastTransitionTime":"2025-12-09T15:08:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.492963 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.493018 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.493028 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.493043 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.493053 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:55Z","lastTransitionTime":"2025-12-09T15:08:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.596155 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.596225 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.596238 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.596258 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.596271 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:55Z","lastTransitionTime":"2025-12-09T15:08:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.699212 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.699282 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.699298 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.699321 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.699337 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:55Z","lastTransitionTime":"2025-12-09T15:08:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.801459 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.801525 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.801541 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.801576 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.801593 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:55Z","lastTransitionTime":"2025-12-09T15:08:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.835310 4716 csr.go:261] certificate signing request csr-8tn6d is approved, waiting to be issued Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.845748 4716 csr.go:257] certificate signing request csr-8tn6d is issued Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.865604 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-q8f5v"] Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.866163 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-q8f5v" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.866283 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-8clts"] Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.866781 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-8clts" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.868464 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.870128 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.871071 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.871676 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.871832 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.871873 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.874304 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.886572 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:55Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.905108 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.905161 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.905173 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.905195 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.905212 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:55Z","lastTransitionTime":"2025-12-09T15:08:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.907713 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:55Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.928234 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:55Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.962923 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:55Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.992467 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-rqz4n"] Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.992821 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-rqz4n" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.993953 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:55Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.995177 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.995718 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Dec 09 15:08:55 crc kubenswrapper[4716]: I1209 15:08:55.996269 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.000490 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.000738 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.008279 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.008349 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.008362 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.008386 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.008401 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:56Z","lastTransitionTime":"2025-12-09T15:08:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.033407 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:56Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.047060 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/df5b4a13-8990-44d5-8b66-a3b696c9774f-serviceca\") pod \"node-ca-8clts\" (UID: \"df5b4a13-8990-44d5-8b66-a3b696c9774f\") " pod="openshift-image-registry/node-ca-8clts" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.047102 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/df5b4a13-8990-44d5-8b66-a3b696c9774f-host\") pod \"node-ca-8clts\" (UID: \"df5b4a13-8990-44d5-8b66-a3b696c9774f\") " pod="openshift-image-registry/node-ca-8clts" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.047130 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dc6th\" (UniqueName: \"kubernetes.io/projected/df5b4a13-8990-44d5-8b66-a3b696c9774f-kube-api-access-dc6th\") pod \"node-ca-8clts\" (UID: \"df5b4a13-8990-44d5-8b66-a3b696c9774f\") " pod="openshift-image-registry/node-ca-8clts" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.047158 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/92bbf960-39e4-4521-9ba9-c66d302ceb3a-hosts-file\") pod \"node-resolver-q8f5v\" (UID: \"92bbf960-39e4-4521-9ba9-c66d302ceb3a\") " pod="openshift-dns/node-resolver-q8f5v" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.047198 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fr45r\" (UniqueName: \"kubernetes.io/projected/92bbf960-39e4-4521-9ba9-c66d302ceb3a-kube-api-access-fr45r\") pod \"node-resolver-q8f5v\" (UID: \"92bbf960-39e4-4521-9ba9-c66d302ceb3a\") " pod="openshift-dns/node-resolver-q8f5v" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.067894 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:56Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.078248 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-qrqxm"] Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.079023 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.096727 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-rdkb2"] Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.097164 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.098950 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.105744 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.107283 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.110051 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.110360 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.110409 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.110637 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.110659 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.110679 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.110690 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.110707 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.110728 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:56Z","lastTransitionTime":"2025-12-09T15:08:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.113995 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:56Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.135904 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-q8f5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92bbf960-39e4-4521-9ba9-c66d302ceb3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fr45r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-q8f5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:56Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.148052 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dc6th\" (UniqueName: \"kubernetes.io/projected/df5b4a13-8990-44d5-8b66-a3b696c9774f-kube-api-access-dc6th\") pod \"node-ca-8clts\" (UID: \"df5b4a13-8990-44d5-8b66-a3b696c9774f\") " pod="openshift-image-registry/node-ca-8clts" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.148111 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/92bbf960-39e4-4521-9ba9-c66d302ceb3a-hosts-file\") pod \"node-resolver-q8f5v\" (UID: \"92bbf960-39e4-4521-9ba9-c66d302ceb3a\") " pod="openshift-dns/node-resolver-q8f5v" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.148142 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-multus-cni-dir\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.148163 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-cnibin\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.148184 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-hostroot\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.148215 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-plvv8\" (UniqueName: \"kubernetes.io/projected/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-kube-api-access-plvv8\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.148254 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-os-release\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.148357 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/92bbf960-39e4-4521-9ba9-c66d302ceb3a-hosts-file\") pod \"node-resolver-q8f5v\" (UID: \"92bbf960-39e4-4521-9ba9-c66d302ceb3a\") " pod="openshift-dns/node-resolver-q8f5v" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.148399 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-cni-binary-copy\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.148430 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-host-run-k8s-cni-cncf-io\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.148457 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fr45r\" (UniqueName: \"kubernetes.io/projected/92bbf960-39e4-4521-9ba9-c66d302ceb3a-kube-api-access-fr45r\") pod \"node-resolver-q8f5v\" (UID: \"92bbf960-39e4-4521-9ba9-c66d302ceb3a\") " pod="openshift-dns/node-resolver-q8f5v" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.148502 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-multus-socket-dir-parent\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.148543 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-host-var-lib-kubelet\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.148575 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-multus-daemon-config\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.148609 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-etc-kubernetes\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.148676 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-system-cni-dir\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.148741 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-host-run-netns\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.148776 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-host-var-lib-cni-multus\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.148907 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/df5b4a13-8990-44d5-8b66-a3b696c9774f-serviceca\") pod \"node-ca-8clts\" (UID: \"df5b4a13-8990-44d5-8b66-a3b696c9774f\") " pod="openshift-image-registry/node-ca-8clts" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.148987 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-multus-conf-dir\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.149016 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-host-run-multus-certs\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.149068 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/df5b4a13-8990-44d5-8b66-a3b696c9774f-host\") pod \"node-ca-8clts\" (UID: \"df5b4a13-8990-44d5-8b66-a3b696c9774f\") " pod="openshift-image-registry/node-ca-8clts" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.149095 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-host-var-lib-cni-bin\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.149227 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/df5b4a13-8990-44d5-8b66-a3b696c9774f-host\") pod \"node-ca-8clts\" (UID: \"df5b4a13-8990-44d5-8b66-a3b696c9774f\") " pod="openshift-image-registry/node-ca-8clts" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.150358 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/df5b4a13-8990-44d5-8b66-a3b696c9774f-serviceca\") pod \"node-ca-8clts\" (UID: \"df5b4a13-8990-44d5-8b66-a3b696c9774f\") " pod="openshift-image-registry/node-ca-8clts" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.153962 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:56Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.165802 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-q8f5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92bbf960-39e4-4521-9ba9-c66d302ceb3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fr45r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-q8f5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:56Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.168273 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fr45r\" (UniqueName: \"kubernetes.io/projected/92bbf960-39e4-4521-9ba9-c66d302ceb3a-kube-api-access-fr45r\") pod \"node-resolver-q8f5v\" (UID: \"92bbf960-39e4-4521-9ba9-c66d302ceb3a\") " pod="openshift-dns/node-resolver-q8f5v" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.170063 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dc6th\" (UniqueName: \"kubernetes.io/projected/df5b4a13-8990-44d5-8b66-a3b696c9774f-kube-api-access-dc6th\") pod \"node-ca-8clts\" (UID: \"df5b4a13-8990-44d5-8b66-a3b696c9774f\") " pod="openshift-image-registry/node-ca-8clts" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.179038 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rqz4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-plvv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rqz4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:56Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.181010 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-q8f5v" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.187292 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-8clts" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.196838 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rdkb2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:56Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:56 crc kubenswrapper[4716]: W1209 15:08:56.197066 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod92bbf960_39e4_4521_9ba9_c66d302ceb3a.slice/crio-2b85ded7000cc38a0a9259c1cb6e94b5b241272181639920b8191cb6cae6aed5 WatchSource:0}: Error finding container 2b85ded7000cc38a0a9259c1cb6e94b5b241272181639920b8191cb6cae6aed5: Status 404 returned error can't find the container with id 2b85ded7000cc38a0a9259c1cb6e94b5b241272181639920b8191cb6cae6aed5 Dec 09 15:08:56 crc kubenswrapper[4716]: W1209 15:08:56.201203 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddf5b4a13_8990_44d5_8b66_a3b696c9774f.slice/crio-0b6b05bb17d08f8d7a2df8fbf763ed8cd5063e905e1955bf606b5308805958e7 WatchSource:0}: Error finding container 0b6b05bb17d08f8d7a2df8fbf763ed8cd5063e905e1955bf606b5308805958e7: Status 404 returned error can't find the container with id 0b6b05bb17d08f8d7a2df8fbf763ed8cd5063e905e1955bf606b5308805958e7 Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.211753 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:56Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.212531 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:08:56 crc kubenswrapper[4716]: E1209 15:08:56.212689 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.213742 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.213842 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.213900 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.213970 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.214035 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:56Z","lastTransitionTime":"2025-12-09T15:08:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.229595 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:56Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.247830 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:56Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.249898 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-host-var-lib-cni-multus\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.249936 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bjm6w\" (UniqueName: \"kubernetes.io/projected/58f66b28-96d7-4c4f-b567-7f0917812e0b-kube-api-access-bjm6w\") pod \"multus-additional-cni-plugins-qrqxm\" (UID: \"58f66b28-96d7-4c4f-b567-7f0917812e0b\") " pod="openshift-multus/multus-additional-cni-plugins-qrqxm" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.249968 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-host-var-lib-cni-bin\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250001 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-cnibin\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250028 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-cni-binary-copy\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250059 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/58f66b28-96d7-4c4f-b567-7f0917812e0b-cni-binary-copy\") pod \"multus-additional-cni-plugins-qrqxm\" (UID: \"58f66b28-96d7-4c4f-b567-7f0917812e0b\") " pod="openshift-multus/multus-additional-cni-plugins-qrqxm" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250072 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-host-var-lib-cni-bin\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250079 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-km64r\" (UniqueName: \"kubernetes.io/projected/d92cd91c-19c2-4865-a522-6d1e3a4cd6a5-kube-api-access-km64r\") pod \"machine-config-daemon-rdkb2\" (UID: \"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\") " pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250135 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-multus-socket-dir-parent\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250159 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-host-var-lib-kubelet\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250184 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d92cd91c-19c2-4865-a522-6d1e3a4cd6a5-proxy-tls\") pod \"machine-config-daemon-rdkb2\" (UID: \"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\") " pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250198 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-cnibin\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250255 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-system-cni-dir\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250208 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-system-cni-dir\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250297 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-host-run-netns\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250312 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-host-var-lib-kubelet\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250331 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-multus-conf-dir\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250359 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-host-run-multus-certs\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250376 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-multus-socket-dir-parent\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250382 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/d92cd91c-19c2-4865-a522-6d1e3a4cd6a5-rootfs\") pod \"machine-config-daemon-rdkb2\" (UID: \"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\") " pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250414 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-multus-conf-dir\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250017 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-host-var-lib-cni-multus\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250450 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-multus-cni-dir\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250479 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-hostroot\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250379 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-host-run-netns\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250512 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-hostroot\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250419 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-host-run-multus-certs\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250506 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-plvv8\" (UniqueName: \"kubernetes.io/projected/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-kube-api-access-plvv8\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250686 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/58f66b28-96d7-4c4f-b567-7f0917812e0b-system-cni-dir\") pod \"multus-additional-cni-plugins-qrqxm\" (UID: \"58f66b28-96d7-4c4f-b567-7f0917812e0b\") " pod="openshift-multus/multus-additional-cni-plugins-qrqxm" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250707 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-cni-binary-copy\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250740 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/58f66b28-96d7-4c4f-b567-7f0917812e0b-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-qrqxm\" (UID: \"58f66b28-96d7-4c4f-b567-7f0917812e0b\") " pod="openshift-multus/multus-additional-cni-plugins-qrqxm" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250791 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-os-release\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250826 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-host-run-k8s-cni-cncf-io\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250855 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/58f66b28-96d7-4c4f-b567-7f0917812e0b-tuning-conf-dir\") pod \"multus-additional-cni-plugins-qrqxm\" (UID: \"58f66b28-96d7-4c4f-b567-7f0917812e0b\") " pod="openshift-multus/multus-additional-cni-plugins-qrqxm" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250877 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-multus-daemon-config\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250887 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-os-release\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250895 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-etc-kubernetes\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250914 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-host-run-k8s-cni-cncf-io\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250920 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-etc-kubernetes\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250958 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/58f66b28-96d7-4c4f-b567-7f0917812e0b-cnibin\") pod \"multus-additional-cni-plugins-qrqxm\" (UID: \"58f66b28-96d7-4c4f-b567-7f0917812e0b\") " pod="openshift-multus/multus-additional-cni-plugins-qrqxm" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250985 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/58f66b28-96d7-4c4f-b567-7f0917812e0b-os-release\") pod \"multus-additional-cni-plugins-qrqxm\" (UID: \"58f66b28-96d7-4c4f-b567-7f0917812e0b\") " pod="openshift-multus/multus-additional-cni-plugins-qrqxm" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.251009 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d92cd91c-19c2-4865-a522-6d1e3a4cd6a5-mcd-auth-proxy-config\") pod \"machine-config-daemon-rdkb2\" (UID: \"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\") " pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.251504 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-multus-daemon-config\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.250778 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-multus-cni-dir\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.281885 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:56Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.282449 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-plvv8\" (UniqueName: \"kubernetes.io/projected/38b4e174-ba72-4a0f-9eed-f2ce970c0afc-kube-api-access-plvv8\") pod \"multus-rqz4n\" (UID: \"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\") " pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.294805 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:56Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.305133 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-rqz4n" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.309806 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f66b28-96d7-4c4f-b567-7f0917812e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qrqxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:56Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:56 crc kubenswrapper[4716]: W1209 15:08:56.318423 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod38b4e174_ba72_4a0f_9eed_f2ce970c0afc.slice/crio-19cb320d15378950b7f392677ee1b0a3a0c901ff344cae206f1305e6f82f5d08 WatchSource:0}: Error finding container 19cb320d15378950b7f392677ee1b0a3a0c901ff344cae206f1305e6f82f5d08: Status 404 returned error can't find the container with id 19cb320d15378950b7f392677ee1b0a3a0c901ff344cae206f1305e6f82f5d08 Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.321530 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.321566 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.321577 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.321595 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.321608 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:56Z","lastTransitionTime":"2025-12-09T15:08:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.334853 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:56Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.347155 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8clts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df5b4a13-8990-44d5-8b66-a3b696c9774f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dc6th\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8clts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:56Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.351716 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/58f66b28-96d7-4c4f-b567-7f0917812e0b-cnibin\") pod \"multus-additional-cni-plugins-qrqxm\" (UID: \"58f66b28-96d7-4c4f-b567-7f0917812e0b\") " pod="openshift-multus/multus-additional-cni-plugins-qrqxm" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.351746 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/58f66b28-96d7-4c4f-b567-7f0917812e0b-os-release\") pod \"multus-additional-cni-plugins-qrqxm\" (UID: \"58f66b28-96d7-4c4f-b567-7f0917812e0b\") " pod="openshift-multus/multus-additional-cni-plugins-qrqxm" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.351766 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d92cd91c-19c2-4865-a522-6d1e3a4cd6a5-mcd-auth-proxy-config\") pod \"machine-config-daemon-rdkb2\" (UID: \"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\") " pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.351784 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bjm6w\" (UniqueName: \"kubernetes.io/projected/58f66b28-96d7-4c4f-b567-7f0917812e0b-kube-api-access-bjm6w\") pod \"multus-additional-cni-plugins-qrqxm\" (UID: \"58f66b28-96d7-4c4f-b567-7f0917812e0b\") " pod="openshift-multus/multus-additional-cni-plugins-qrqxm" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.351819 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/58f66b28-96d7-4c4f-b567-7f0917812e0b-cni-binary-copy\") pod \"multus-additional-cni-plugins-qrqxm\" (UID: \"58f66b28-96d7-4c4f-b567-7f0917812e0b\") " pod="openshift-multus/multus-additional-cni-plugins-qrqxm" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.351832 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/58f66b28-96d7-4c4f-b567-7f0917812e0b-cnibin\") pod \"multus-additional-cni-plugins-qrqxm\" (UID: \"58f66b28-96d7-4c4f-b567-7f0917812e0b\") " pod="openshift-multus/multus-additional-cni-plugins-qrqxm" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.351837 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-km64r\" (UniqueName: \"kubernetes.io/projected/d92cd91c-19c2-4865-a522-6d1e3a4cd6a5-kube-api-access-km64r\") pod \"machine-config-daemon-rdkb2\" (UID: \"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\") " pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.351893 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d92cd91c-19c2-4865-a522-6d1e3a4cd6a5-proxy-tls\") pod \"machine-config-daemon-rdkb2\" (UID: \"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\") " pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.351927 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/d92cd91c-19c2-4865-a522-6d1e3a4cd6a5-rootfs\") pod \"machine-config-daemon-rdkb2\" (UID: \"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\") " pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.351948 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/58f66b28-96d7-4c4f-b567-7f0917812e0b-system-cni-dir\") pod \"multus-additional-cni-plugins-qrqxm\" (UID: \"58f66b28-96d7-4c4f-b567-7f0917812e0b\") " pod="openshift-multus/multus-additional-cni-plugins-qrqxm" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.351967 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/58f66b28-96d7-4c4f-b567-7f0917812e0b-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-qrqxm\" (UID: \"58f66b28-96d7-4c4f-b567-7f0917812e0b\") " pod="openshift-multus/multus-additional-cni-plugins-qrqxm" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.351972 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/58f66b28-96d7-4c4f-b567-7f0917812e0b-os-release\") pod \"multus-additional-cni-plugins-qrqxm\" (UID: \"58f66b28-96d7-4c4f-b567-7f0917812e0b\") " pod="openshift-multus/multus-additional-cni-plugins-qrqxm" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.351985 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/58f66b28-96d7-4c4f-b567-7f0917812e0b-tuning-conf-dir\") pod \"multus-additional-cni-plugins-qrqxm\" (UID: \"58f66b28-96d7-4c4f-b567-7f0917812e0b\") " pod="openshift-multus/multus-additional-cni-plugins-qrqxm" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.352160 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/d92cd91c-19c2-4865-a522-6d1e3a4cd6a5-rootfs\") pod \"machine-config-daemon-rdkb2\" (UID: \"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\") " pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.352208 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/58f66b28-96d7-4c4f-b567-7f0917812e0b-system-cni-dir\") pod \"multus-additional-cni-plugins-qrqxm\" (UID: \"58f66b28-96d7-4c4f-b567-7f0917812e0b\") " pod="openshift-multus/multus-additional-cni-plugins-qrqxm" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.352418 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/58f66b28-96d7-4c4f-b567-7f0917812e0b-tuning-conf-dir\") pod \"multus-additional-cni-plugins-qrqxm\" (UID: \"58f66b28-96d7-4c4f-b567-7f0917812e0b\") " pod="openshift-multus/multus-additional-cni-plugins-qrqxm" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.352863 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d92cd91c-19c2-4865-a522-6d1e3a4cd6a5-mcd-auth-proxy-config\") pod \"machine-config-daemon-rdkb2\" (UID: \"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\") " pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.353014 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/58f66b28-96d7-4c4f-b567-7f0917812e0b-cni-binary-copy\") pod \"multus-additional-cni-plugins-qrqxm\" (UID: \"58f66b28-96d7-4c4f-b567-7f0917812e0b\") " pod="openshift-multus/multus-additional-cni-plugins-qrqxm" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.353125 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/58f66b28-96d7-4c4f-b567-7f0917812e0b-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-qrqxm\" (UID: \"58f66b28-96d7-4c4f-b567-7f0917812e0b\") " pod="openshift-multus/multus-additional-cni-plugins-qrqxm" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.357214 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d92cd91c-19c2-4865-a522-6d1e3a4cd6a5-proxy-tls\") pod \"machine-config-daemon-rdkb2\" (UID: \"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\") " pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.367535 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:56Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.370962 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bjm6w\" (UniqueName: \"kubernetes.io/projected/58f66b28-96d7-4c4f-b567-7f0917812e0b-kube-api-access-bjm6w\") pod \"multus-additional-cni-plugins-qrqxm\" (UID: \"58f66b28-96d7-4c4f-b567-7f0917812e0b\") " pod="openshift-multus/multus-additional-cni-plugins-qrqxm" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.374562 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rqz4n" event={"ID":"38b4e174-ba72-4a0f-9eed-f2ce970c0afc","Type":"ContainerStarted","Data":"19cb320d15378950b7f392677ee1b0a3a0c901ff344cae206f1305e6f82f5d08"} Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.376351 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-q8f5v" event={"ID":"92bbf960-39e4-4521-9ba9-c66d302ceb3a","Type":"ContainerStarted","Data":"2b85ded7000cc38a0a9259c1cb6e94b5b241272181639920b8191cb6cae6aed5"} Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.377478 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-8clts" event={"ID":"df5b4a13-8990-44d5-8b66-a3b696c9774f","Type":"ContainerStarted","Data":"0b6b05bb17d08f8d7a2df8fbf763ed8cd5063e905e1955bf606b5308805958e7"} Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.378408 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-km64r\" (UniqueName: \"kubernetes.io/projected/d92cd91c-19c2-4865-a522-6d1e3a4cd6a5-kube-api-access-km64r\") pod \"machine-config-daemon-rdkb2\" (UID: \"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\") " pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.391861 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.409417 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" Dec 09 15:08:56 crc kubenswrapper[4716]: W1209 15:08:56.410801 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod58f66b28_96d7_4c4f_b567_7f0917812e0b.slice/crio-9da72ebfd4a68878064c3ec53a6cf50f02ae6fa0edd828796b6a4be1daecd77d WatchSource:0}: Error finding container 9da72ebfd4a68878064c3ec53a6cf50f02ae6fa0edd828796b6a4be1daecd77d: Status 404 returned error can't find the container with id 9da72ebfd4a68878064c3ec53a6cf50f02ae6fa0edd828796b6a4be1daecd77d Dec 09 15:08:56 crc kubenswrapper[4716]: W1209 15:08:56.427558 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd92cd91c_19c2_4865_a522_6d1e3a4cd6a5.slice/crio-54daa332cc168c6ec4a3ec4811b25773040dd58f643005bed2aea65fe4d60d4f WatchSource:0}: Error finding container 54daa332cc168c6ec4a3ec4811b25773040dd58f643005bed2aea65fe4d60d4f: Status 404 returned error can't find the container with id 54daa332cc168c6ec4a3ec4811b25773040dd58f643005bed2aea65fe4d60d4f Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.428088 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.428122 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.428133 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.428155 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.428167 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:56Z","lastTransitionTime":"2025-12-09T15:08:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.531435 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.531742 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.531847 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.531927 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.531994 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:56Z","lastTransitionTime":"2025-12-09T15:08:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.634208 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.634256 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.634270 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.634290 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.634303 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:56Z","lastTransitionTime":"2025-12-09T15:08:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.736727 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.736777 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.736788 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.736803 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.736814 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:56Z","lastTransitionTime":"2025-12-09T15:08:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.839086 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.839119 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.839129 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.839144 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.839154 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:56Z","lastTransitionTime":"2025-12-09T15:08:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.847488 4716 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-12-09 15:03:55 +0000 UTC, rotation deadline is 2026-10-16 02:11:53.688139387 +0000 UTC Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.847604 4716 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 7451h2m56.840543402s for next certificate rotation Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.894895 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-hcdn4"] Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.895944 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.898740 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.899383 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.899713 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.899777 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.899858 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.901701 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.902378 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.923694 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:56Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.942205 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.942265 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.942281 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.942308 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.942322 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:56Z","lastTransitionTime":"2025-12-09T15:08:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:56 crc kubenswrapper[4716]: I1209 15:08:56.976132 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8clts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df5b4a13-8990-44d5-8b66-a3b696c9774f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dc6th\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8clts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:56Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.020193 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d953045-e94a-4e04-b78e-bc20b3a8c36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hcdn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.046319 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.046598 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.046695 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.046765 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.046828 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:57Z","lastTransitionTime":"2025-12-09T15:08:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.048927 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.061599 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-var-lib-openvswitch\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.061893 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3d953045-e94a-4e04-b78e-bc20b3a8c36c-ovn-node-metrics-cert\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.062027 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-54c5g\" (UniqueName: \"kubernetes.io/projected/3d953045-e94a-4e04-b78e-bc20b3a8c36c-kube-api-access-54c5g\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.062190 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-run-ovn\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.062255 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-run-netns\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.062352 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-cni-bin\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.062400 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-run-systemd\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.062424 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-systemd-units\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.062466 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-slash\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.062487 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-cni-netd\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.062506 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3d953045-e94a-4e04-b78e-bc20b3a8c36c-ovnkube-script-lib\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.062535 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-etc-openvswitch\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.062553 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-kubelet\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.062571 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-run-ovn-kubernetes\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.062795 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.062886 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-log-socket\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.062918 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3d953045-e94a-4e04-b78e-bc20b3a8c36c-ovnkube-config\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.063152 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3d953045-e94a-4e04-b78e-bc20b3a8c36c-env-overrides\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.063323 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-node-log\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.063430 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-run-openvswitch\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.075159 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.109467 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.135104 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-q8f5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92bbf960-39e4-4521-9ba9-c66d302ceb3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fr45r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-q8f5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.153108 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.153144 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.153153 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.153169 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.153179 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:57Z","lastTransitionTime":"2025-12-09T15:08:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.164361 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3d953045-e94a-4e04-b78e-bc20b3a8c36c-env-overrides\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.164434 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-node-log\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.164475 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-run-openvswitch\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.164511 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-var-lib-openvswitch\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.164530 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3d953045-e94a-4e04-b78e-bc20b3a8c36c-ovn-node-metrics-cert\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.164546 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-54c5g\" (UniqueName: \"kubernetes.io/projected/3d953045-e94a-4e04-b78e-bc20b3a8c36c-kube-api-access-54c5g\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.164565 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-run-ovn\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.164583 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-run-netns\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.164611 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-run-systemd\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.164645 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-cni-bin\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.164689 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-systemd-units\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.164683 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-var-lib-openvswitch\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.164674 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rqz4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-plvv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rqz4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.164756 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-slash\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.164756 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-run-ovn\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.164796 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-systemd-units\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.164802 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-cni-bin\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.164689 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-run-openvswitch\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.164766 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-run-systemd\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.164836 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-run-netns\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.164712 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-slash\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.164880 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-cni-netd\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.164908 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3d953045-e94a-4e04-b78e-bc20b3a8c36c-ovnkube-script-lib\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.164943 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-etc-openvswitch\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.164972 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-kubelet\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.164993 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-run-ovn-kubernetes\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.165042 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.165070 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-log-socket\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.165091 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3d953045-e94a-4e04-b78e-bc20b3a8c36c-ovnkube-config\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.165114 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-etc-openvswitch\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.165178 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-kubelet\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.165215 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-run-ovn-kubernetes\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.165244 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.165271 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-log-socket\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.165303 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-cni-netd\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.165521 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3d953045-e94a-4e04-b78e-bc20b3a8c36c-env-overrides\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.165914 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3d953045-e94a-4e04-b78e-bc20b3a8c36c-ovnkube-config\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.165990 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-node-log\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.166120 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3d953045-e94a-4e04-b78e-bc20b3a8c36c-ovnkube-script-lib\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.170010 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3d953045-e94a-4e04-b78e-bc20b3a8c36c-ovn-node-metrics-cert\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.185278 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rdkb2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.188064 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-54c5g\" (UniqueName: \"kubernetes.io/projected/3d953045-e94a-4e04-b78e-bc20b3a8c36c-kube-api-access-54c5g\") pod \"ovnkube-node-hcdn4\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.202605 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.207233 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.213506 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.213557 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:08:57 crc kubenswrapper[4716]: E1209 15:08:57.213658 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:08:57 crc kubenswrapper[4716]: E1209 15:08:57.213746 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.217778 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: W1209 15:08:57.225508 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3d953045_e94a_4e04_b78e_bc20b3a8c36c.slice/crio-9094de3f2f9c5f9524e307d7f0fe1431a15578d89a0d93698a0754d687fcbdb4 WatchSource:0}: Error finding container 9094de3f2f9c5f9524e307d7f0fe1431a15578d89a0d93698a0754d687fcbdb4: Status 404 returned error can't find the container with id 9094de3f2f9c5f9524e307d7f0fe1431a15578d89a0d93698a0754d687fcbdb4 Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.230890 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.250075 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.256651 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.256718 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.256736 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.256759 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.256775 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:57Z","lastTransitionTime":"2025-12-09T15:08:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.263737 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f66b28-96d7-4c4f-b567-7f0917812e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qrqxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.359024 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.359072 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.359081 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.359103 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.359112 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:57Z","lastTransitionTime":"2025-12-09T15:08:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.382999 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerStarted","Data":"ef5255ac80d96695612cdac86e63f82688271183d2daa61af9122a1bdd801089"} Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.383044 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerStarted","Data":"28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce"} Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.383056 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerStarted","Data":"54daa332cc168c6ec4a3ec4811b25773040dd58f643005bed2aea65fe4d60d4f"} Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.384562 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-8clts" event={"ID":"df5b4a13-8990-44d5-8b66-a3b696c9774f","Type":"ContainerStarted","Data":"11c3246d292d0a1c1ca6f46baf71f93f06b00755a208e4e6fed6cc3653408589"} Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.386329 4716 generic.go:334] "Generic (PLEG): container finished" podID="58f66b28-96d7-4c4f-b567-7f0917812e0b" containerID="f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b" exitCode=0 Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.386427 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" event={"ID":"58f66b28-96d7-4c4f-b567-7f0917812e0b","Type":"ContainerDied","Data":"f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b"} Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.386480 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" event={"ID":"58f66b28-96d7-4c4f-b567-7f0917812e0b","Type":"ContainerStarted","Data":"9da72ebfd4a68878064c3ec53a6cf50f02ae6fa0edd828796b6a4be1daecd77d"} Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.388929 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rqz4n" event={"ID":"38b4e174-ba72-4a0f-9eed-f2ce970c0afc","Type":"ContainerStarted","Data":"b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4"} Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.390272 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-q8f5v" event={"ID":"92bbf960-39e4-4521-9ba9-c66d302ceb3a","Type":"ContainerStarted","Data":"2496e752c40e1c175f4ea3321eef81439ae0cd8b988efddf694e31b517d47d9e"} Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.391643 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" event={"ID":"3d953045-e94a-4e04-b78e-bc20b3a8c36c","Type":"ContainerStarted","Data":"9094de3f2f9c5f9524e307d7f0fe1431a15578d89a0d93698a0754d687fcbdb4"} Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.406706 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.420888 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.434553 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.447656 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.468637 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.468725 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.468736 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.468752 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.469149 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:57Z","lastTransitionTime":"2025-12-09T15:08:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.486273 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f66b28-96d7-4c4f-b567-7f0917812e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qrqxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.510529 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.536932 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8clts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df5b4a13-8990-44d5-8b66-a3b696c9774f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dc6th\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8clts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.564523 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d953045-e94a-4e04-b78e-bc20b3a8c36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hcdn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.572014 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.572057 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.572068 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.572087 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.572099 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:57Z","lastTransitionTime":"2025-12-09T15:08:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.580489 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.600439 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.614443 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.627490 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-q8f5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92bbf960-39e4-4521-9ba9-c66d302ceb3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fr45r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-q8f5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.641822 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rqz4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-plvv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rqz4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.654386 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef5255ac80d96695612cdac86e63f82688271183d2daa61af9122a1bdd801089\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rdkb2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.667893 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-q8f5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92bbf960-39e4-4521-9ba9-c66d302ceb3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2496e752c40e1c175f4ea3321eef81439ae0cd8b988efddf694e31b517d47d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fr45r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-q8f5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.675067 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.675113 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.675125 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.675145 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.675159 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:57Z","lastTransitionTime":"2025-12-09T15:08:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.687502 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rqz4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-plvv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rqz4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.699982 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef5255ac80d96695612cdac86e63f82688271183d2daa61af9122a1bdd801089\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rdkb2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.715506 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.728653 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.742490 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.758244 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.776313 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.777889 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.777938 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.777951 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.777967 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.777980 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:57Z","lastTransitionTime":"2025-12-09T15:08:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.793134 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f66b28-96d7-4c4f-b567-7f0917812e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qrqxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.809336 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.824995 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.839903 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.852243 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8clts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df5b4a13-8990-44d5-8b66-a3b696c9774f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11c3246d292d0a1c1ca6f46baf71f93f06b00755a208e4e6fed6cc3653408589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dc6th\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8clts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.874168 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d953045-e94a-4e04-b78e-bc20b3a8c36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hcdn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:57Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.882034 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.882261 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.882323 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.882393 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.882481 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:57Z","lastTransitionTime":"2025-12-09T15:08:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.985236 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.985286 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.985300 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.985321 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:57 crc kubenswrapper[4716]: I1209 15:08:57.985336 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:57Z","lastTransitionTime":"2025-12-09T15:08:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.088862 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.088922 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.088940 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.088970 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.088988 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:58Z","lastTransitionTime":"2025-12-09T15:08:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.192233 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.192291 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.192301 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.192334 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.192344 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:58Z","lastTransitionTime":"2025-12-09T15:08:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.213688 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:08:58 crc kubenswrapper[4716]: E1209 15:08:58.213886 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.294429 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.294469 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.294479 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.294494 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.294506 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:58Z","lastTransitionTime":"2025-12-09T15:08:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.396404 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.396478 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.396489 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.396510 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.396501 4716 generic.go:334] "Generic (PLEG): container finished" podID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerID="5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598" exitCode=0 Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.396524 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:58Z","lastTransitionTime":"2025-12-09T15:08:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.396574 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" event={"ID":"3d953045-e94a-4e04-b78e-bc20b3a8c36c","Type":"ContainerDied","Data":"5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598"} Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.398752 4716 generic.go:334] "Generic (PLEG): container finished" podID="58f66b28-96d7-4c4f-b567-7f0917812e0b" containerID="7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4" exitCode=0 Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.398863 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" event={"ID":"58f66b28-96d7-4c4f-b567-7f0917812e0b","Type":"ContainerDied","Data":"7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4"} Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.411552 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:58Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.426770 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8clts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df5b4a13-8990-44d5-8b66-a3b696c9774f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11c3246d292d0a1c1ca6f46baf71f93f06b00755a208e4e6fed6cc3653408589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dc6th\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8clts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:58Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.448051 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d953045-e94a-4e04-b78e-bc20b3a8c36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hcdn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:58Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.465570 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:58Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.479473 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:58Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.492365 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:58Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.503932 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-q8f5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92bbf960-39e4-4521-9ba9-c66d302ceb3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2496e752c40e1c175f4ea3321eef81439ae0cd8b988efddf694e31b517d47d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fr45r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-q8f5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:58Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.504223 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.504337 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.504416 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.504500 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.504585 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:58Z","lastTransitionTime":"2025-12-09T15:08:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.518367 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rqz4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-plvv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rqz4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:58Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.531715 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef5255ac80d96695612cdac86e63f82688271183d2daa61af9122a1bdd801089\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rdkb2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:58Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.548212 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:58Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.561467 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:58Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.575337 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:58Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.590339 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:58Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.608355 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f66b28-96d7-4c4f-b567-7f0917812e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qrqxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:58Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.611391 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.611425 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.611439 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.611456 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.611468 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:58Z","lastTransitionTime":"2025-12-09T15:08:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.628496 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d953045-e94a-4e04-b78e-bc20b3a8c36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hcdn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:58Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.643177 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:58Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.657183 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:58Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.670086 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:58Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.679817 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-q8f5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92bbf960-39e4-4521-9ba9-c66d302ceb3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2496e752c40e1c175f4ea3321eef81439ae0cd8b988efddf694e31b517d47d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fr45r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-q8f5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:58Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.696804 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rqz4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-plvv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rqz4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:58Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.710636 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef5255ac80d96695612cdac86e63f82688271183d2daa61af9122a1bdd801089\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rdkb2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:58Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.713254 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.713302 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.713316 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.713335 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.713351 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:58Z","lastTransitionTime":"2025-12-09T15:08:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.726409 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:58Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.740718 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:58Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.753661 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:58Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.767856 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:58Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.781779 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f66b28-96d7-4c4f-b567-7f0917812e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qrqxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:58Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.793422 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:58Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.802731 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8clts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df5b4a13-8990-44d5-8b66-a3b696c9774f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11c3246d292d0a1c1ca6f46baf71f93f06b00755a208e4e6fed6cc3653408589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dc6th\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8clts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:58Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.817163 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.817214 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.817232 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.817254 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.817269 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:58Z","lastTransitionTime":"2025-12-09T15:08:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.882124 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:08:58 crc kubenswrapper[4716]: E1209 15:08:58.882396 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:09:06.882342879 +0000 UTC m=+34.037086877 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.919747 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.919791 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.919805 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.919825 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.919837 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:58Z","lastTransitionTime":"2025-12-09T15:08:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.983460 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.983525 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.983552 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:08:58 crc kubenswrapper[4716]: I1209 15:08:58.983584 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:08:58 crc kubenswrapper[4716]: E1209 15:08:58.983764 4716 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 09 15:08:58 crc kubenswrapper[4716]: E1209 15:08:58.983792 4716 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 09 15:08:58 crc kubenswrapper[4716]: E1209 15:08:58.983809 4716 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 15:08:58 crc kubenswrapper[4716]: E1209 15:08:58.983812 4716 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 09 15:08:58 crc kubenswrapper[4716]: E1209 15:08:58.983858 4716 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 09 15:08:58 crc kubenswrapper[4716]: E1209 15:08:58.983883 4716 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 09 15:08:58 crc kubenswrapper[4716]: E1209 15:08:58.983937 4716 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 09 15:08:58 crc kubenswrapper[4716]: E1209 15:08:58.983953 4716 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 15:08:58 crc kubenswrapper[4716]: E1209 15:08:58.983886 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-09 15:09:06.983865522 +0000 UTC m=+34.138609510 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 15:08:58 crc kubenswrapper[4716]: E1209 15:08:58.984028 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-09 15:09:06.983985245 +0000 UTC m=+34.138729383 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 09 15:08:58 crc kubenswrapper[4716]: E1209 15:08:58.984051 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-09 15:09:06.984040947 +0000 UTC m=+34.138785165 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 09 15:08:58 crc kubenswrapper[4716]: E1209 15:08:58.984070 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-09 15:09:06.984059858 +0000 UTC m=+34.138804076 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.022036 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.022074 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.022084 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.022102 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.022113 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:59Z","lastTransitionTime":"2025-12-09T15:08:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.124718 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.124772 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.124786 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.124809 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.124822 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:59Z","lastTransitionTime":"2025-12-09T15:08:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.212970 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.213091 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:08:59 crc kubenswrapper[4716]: E1209 15:08:59.213147 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:08:59 crc kubenswrapper[4716]: E1209 15:08:59.213272 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.227125 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.227163 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.227173 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.227188 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.227201 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:59Z","lastTransitionTime":"2025-12-09T15:08:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.329301 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.329348 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.329364 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.329382 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.329393 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:59Z","lastTransitionTime":"2025-12-09T15:08:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.406508 4716 generic.go:334] "Generic (PLEG): container finished" podID="58f66b28-96d7-4c4f-b567-7f0917812e0b" containerID="73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc" exitCode=0 Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.406570 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" event={"ID":"58f66b28-96d7-4c4f-b567-7f0917812e0b","Type":"ContainerDied","Data":"73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc"} Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.411850 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" event={"ID":"3d953045-e94a-4e04-b78e-bc20b3a8c36c","Type":"ContainerStarted","Data":"eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1"} Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.411962 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" event={"ID":"3d953045-e94a-4e04-b78e-bc20b3a8c36c","Type":"ContainerStarted","Data":"0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc"} Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.411976 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" event={"ID":"3d953045-e94a-4e04-b78e-bc20b3a8c36c","Type":"ContainerStarted","Data":"b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09"} Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.411986 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" event={"ID":"3d953045-e94a-4e04-b78e-bc20b3a8c36c","Type":"ContainerStarted","Data":"14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f"} Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.411995 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" event={"ID":"3d953045-e94a-4e04-b78e-bc20b3a8c36c","Type":"ContainerStarted","Data":"6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af"} Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.412022 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" event={"ID":"3d953045-e94a-4e04-b78e-bc20b3a8c36c","Type":"ContainerStarted","Data":"663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0"} Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.426240 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:59Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.432205 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.432248 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.432258 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.432276 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.432287 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:59Z","lastTransitionTime":"2025-12-09T15:08:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.437943 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8clts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df5b4a13-8990-44d5-8b66-a3b696c9774f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11c3246d292d0a1c1ca6f46baf71f93f06b00755a208e4e6fed6cc3653408589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dc6th\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8clts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:59Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.461235 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d953045-e94a-4e04-b78e-bc20b3a8c36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hcdn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:59Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.476609 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:59Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.493410 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:59Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.508005 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:59Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.519352 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-q8f5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92bbf960-39e4-4521-9ba9-c66d302ceb3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2496e752c40e1c175f4ea3321eef81439ae0cd8b988efddf694e31b517d47d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fr45r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-q8f5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:59Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.534577 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rqz4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-plvv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rqz4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:59Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.535304 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.535334 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.535343 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.535357 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.535367 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:59Z","lastTransitionTime":"2025-12-09T15:08:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.547221 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef5255ac80d96695612cdac86e63f82688271183d2daa61af9122a1bdd801089\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rdkb2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:59Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.562501 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:59Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.579599 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:59Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.595285 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:59Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.612442 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:59Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.633020 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f66b28-96d7-4c4f-b567-7f0917812e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qrqxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:08:59Z is after 2025-08-24T17:21:41Z" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.638026 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.638080 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.638090 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.638111 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.638135 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:59Z","lastTransitionTime":"2025-12-09T15:08:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.743547 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.743594 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.743605 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.743640 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.743656 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:59Z","lastTransitionTime":"2025-12-09T15:08:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.846580 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.846663 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.846685 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.846706 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.846720 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:59Z","lastTransitionTime":"2025-12-09T15:08:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.950240 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.950292 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.950306 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.950327 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:08:59 crc kubenswrapper[4716]: I1209 15:08:59.950341 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:08:59Z","lastTransitionTime":"2025-12-09T15:08:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.053787 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.053849 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.053870 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.053900 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.053921 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:00Z","lastTransitionTime":"2025-12-09T15:09:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.157496 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.157553 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.157566 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.157586 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.157599 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:00Z","lastTransitionTime":"2025-12-09T15:09:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.213307 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:09:00 crc kubenswrapper[4716]: E1209 15:09:00.213471 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.260871 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.260916 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.260925 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.260942 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.260952 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:00Z","lastTransitionTime":"2025-12-09T15:09:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.363799 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.363850 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.363862 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.363890 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.363901 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:00Z","lastTransitionTime":"2025-12-09T15:09:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.418746 4716 generic.go:334] "Generic (PLEG): container finished" podID="58f66b28-96d7-4c4f-b567-7f0917812e0b" containerID="2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f" exitCode=0 Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.418798 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" event={"ID":"58f66b28-96d7-4c4f-b567-7f0917812e0b","Type":"ContainerDied","Data":"2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f"} Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.446436 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:00Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.466573 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.466633 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.466643 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.466656 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.466665 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:00Z","lastTransitionTime":"2025-12-09T15:09:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.481518 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8clts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df5b4a13-8990-44d5-8b66-a3b696c9774f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11c3246d292d0a1c1ca6f46baf71f93f06b00755a208e4e6fed6cc3653408589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dc6th\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8clts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:00Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.511883 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d953045-e94a-4e04-b78e-bc20b3a8c36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hcdn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:00Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.528301 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef5255ac80d96695612cdac86e63f82688271183d2daa61af9122a1bdd801089\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rdkb2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:00Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.542781 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:00Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.556888 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:00Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.570110 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.570160 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.570176 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.570195 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.570214 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:00Z","lastTransitionTime":"2025-12-09T15:09:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.571853 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:00Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.583958 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-q8f5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92bbf960-39e4-4521-9ba9-c66d302ceb3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2496e752c40e1c175f4ea3321eef81439ae0cd8b988efddf694e31b517d47d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fr45r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-q8f5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:00Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.600729 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rqz4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-plvv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rqz4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:00Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.616992 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f66b28-96d7-4c4f-b567-7f0917812e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qrqxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:00Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.635707 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:00Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.653033 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:00Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.667340 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:00Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.673329 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.673368 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.673382 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.673399 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.673409 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:00Z","lastTransitionTime":"2025-12-09T15:09:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.681427 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:00Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.776383 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.776440 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.776451 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.776467 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.776479 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:00Z","lastTransitionTime":"2025-12-09T15:09:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.879058 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.879116 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.879128 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.879146 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.879159 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:00Z","lastTransitionTime":"2025-12-09T15:09:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.982923 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.982995 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.983021 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.983057 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:00 crc kubenswrapper[4716]: I1209 15:09:00.983081 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:00Z","lastTransitionTime":"2025-12-09T15:09:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.085673 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.085722 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.085739 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.085762 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.085779 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:01Z","lastTransitionTime":"2025-12-09T15:09:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.188587 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.188652 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.188663 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.188679 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.188690 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:01Z","lastTransitionTime":"2025-12-09T15:09:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.213205 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:09:01 crc kubenswrapper[4716]: E1209 15:09:01.213323 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.213378 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:09:01 crc kubenswrapper[4716]: E1209 15:09:01.213523 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.291959 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.292004 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.292017 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.292035 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.292047 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:01Z","lastTransitionTime":"2025-12-09T15:09:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.394228 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.394267 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.394278 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.394292 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.394303 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:01Z","lastTransitionTime":"2025-12-09T15:09:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.424245 4716 generic.go:334] "Generic (PLEG): container finished" podID="58f66b28-96d7-4c4f-b567-7f0917812e0b" containerID="62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3" exitCode=0 Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.424336 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" event={"ID":"58f66b28-96d7-4c4f-b567-7f0917812e0b","Type":"ContainerDied","Data":"62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3"} Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.428599 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" event={"ID":"3d953045-e94a-4e04-b78e-bc20b3a8c36c","Type":"ContainerStarted","Data":"26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d"} Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.443026 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f66b28-96d7-4c4f-b567-7f0917812e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qrqxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:01Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.456976 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:01Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.472248 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:01Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.486055 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:01Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.496984 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.497039 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.497053 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.497071 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.497082 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:01Z","lastTransitionTime":"2025-12-09T15:09:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.499569 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:01Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.512665 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:01Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.525154 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8clts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df5b4a13-8990-44d5-8b66-a3b696c9774f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11c3246d292d0a1c1ca6f46baf71f93f06b00755a208e4e6fed6cc3653408589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dc6th\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8clts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:01Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.544239 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d953045-e94a-4e04-b78e-bc20b3a8c36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hcdn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:01Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.557503 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef5255ac80d96695612cdac86e63f82688271183d2daa61af9122a1bdd801089\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rdkb2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:01Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.570931 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:01Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.585335 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:01Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.600326 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.600379 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.600446 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.600468 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.600480 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:01Z","lastTransitionTime":"2025-12-09T15:09:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.600990 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:01Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.611751 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-q8f5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92bbf960-39e4-4521-9ba9-c66d302ceb3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2496e752c40e1c175f4ea3321eef81439ae0cd8b988efddf694e31b517d47d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fr45r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-q8f5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:01Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.625977 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rqz4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-plvv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rqz4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:01Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.702934 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.703290 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.703299 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.703313 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.703323 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:01Z","lastTransitionTime":"2025-12-09T15:09:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.806029 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.806092 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.806102 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.806119 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.806128 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:01Z","lastTransitionTime":"2025-12-09T15:09:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.908991 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.909045 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.909057 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.909077 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:01 crc kubenswrapper[4716]: I1209 15:09:01.909091 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:01Z","lastTransitionTime":"2025-12-09T15:09:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.012400 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.012441 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.012450 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.012467 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.012477 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:02Z","lastTransitionTime":"2025-12-09T15:09:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.115318 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.115372 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.115384 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.115403 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.115417 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:02Z","lastTransitionTime":"2025-12-09T15:09:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.213505 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:09:02 crc kubenswrapper[4716]: E1209 15:09:02.213683 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.218460 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.218487 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.218495 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.218507 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.218522 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:02Z","lastTransitionTime":"2025-12-09T15:09:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.322446 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.322491 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.322504 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.322524 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.322538 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:02Z","lastTransitionTime":"2025-12-09T15:09:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.425070 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.425127 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.425142 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.425164 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.425177 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:02Z","lastTransitionTime":"2025-12-09T15:09:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.434437 4716 generic.go:334] "Generic (PLEG): container finished" podID="58f66b28-96d7-4c4f-b567-7f0917812e0b" containerID="748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576" exitCode=0 Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.434495 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" event={"ID":"58f66b28-96d7-4c4f-b567-7f0917812e0b","Type":"ContainerDied","Data":"748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576"} Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.454180 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:02Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.468263 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:02Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.485698 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:02Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.498105 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-q8f5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92bbf960-39e4-4521-9ba9-c66d302ceb3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2496e752c40e1c175f4ea3321eef81439ae0cd8b988efddf694e31b517d47d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fr45r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-q8f5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:02Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.514975 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rqz4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-plvv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rqz4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:02Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.528342 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef5255ac80d96695612cdac86e63f82688271183d2daa61af9122a1bdd801089\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rdkb2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:02Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.528821 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.528862 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.528912 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.528930 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.528943 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:02Z","lastTransitionTime":"2025-12-09T15:09:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.543793 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:02Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.557914 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:02Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.569359 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:02Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.581935 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:02Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.596960 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f66b28-96d7-4c4f-b567-7f0917812e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qrqxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:02Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.609564 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:02Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.621589 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8clts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df5b4a13-8990-44d5-8b66-a3b696c9774f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11c3246d292d0a1c1ca6f46baf71f93f06b00755a208e4e6fed6cc3653408589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dc6th\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8clts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:02Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.632271 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.632310 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.632323 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.632342 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.632354 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:02Z","lastTransitionTime":"2025-12-09T15:09:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.639569 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d953045-e94a-4e04-b78e-bc20b3a8c36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hcdn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:02Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.734911 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.734939 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.734948 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.734961 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.734972 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:02Z","lastTransitionTime":"2025-12-09T15:09:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.837742 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.837782 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.837793 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.837810 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.837821 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:02Z","lastTransitionTime":"2025-12-09T15:09:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.940040 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.940076 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.940086 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.940100 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:02 crc kubenswrapper[4716]: I1209 15:09:02.940110 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:02Z","lastTransitionTime":"2025-12-09T15:09:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.040012 4716 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.044070 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.044197 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.044264 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.044332 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.044395 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:03Z","lastTransitionTime":"2025-12-09T15:09:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.146238 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.146272 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.146281 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.146294 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.146304 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:03Z","lastTransitionTime":"2025-12-09T15:09:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.214120 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:09:03 crc kubenswrapper[4716]: E1209 15:09:03.214275 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.214797 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:09:03 crc kubenswrapper[4716]: E1209 15:09:03.214895 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.233649 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.246798 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8clts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df5b4a13-8990-44d5-8b66-a3b696c9774f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11c3246d292d0a1c1ca6f46baf71f93f06b00755a208e4e6fed6cc3653408589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dc6th\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8clts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.248489 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.248609 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.248728 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.248824 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.248915 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:03Z","lastTransitionTime":"2025-12-09T15:09:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.267610 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d953045-e94a-4e04-b78e-bc20b3a8c36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hcdn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.280031 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef5255ac80d96695612cdac86e63f82688271183d2daa61af9122a1bdd801089\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rdkb2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.298921 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.313158 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.328030 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.341038 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-q8f5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92bbf960-39e4-4521-9ba9-c66d302ceb3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2496e752c40e1c175f4ea3321eef81439ae0cd8b988efddf694e31b517d47d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fr45r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-q8f5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.350731 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.350769 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.350784 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.350803 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.350815 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:03Z","lastTransitionTime":"2025-12-09T15:09:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.358306 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rqz4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-plvv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rqz4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.375757 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f66b28-96d7-4c4f-b567-7f0917812e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qrqxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.392077 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.405444 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.418920 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.431913 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.441672 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" event={"ID":"58f66b28-96d7-4c4f-b567-7f0917812e0b","Type":"ContainerStarted","Data":"05b0adb7ab9ef293fc4852b947c3c00832260edfb0472d0477e3c529cef5a814"} Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.447511 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" event={"ID":"3d953045-e94a-4e04-b78e-bc20b3a8c36c","Type":"ContainerStarted","Data":"439f6940ba3b88eb74bdac4c72b4da0981faebaa60856c84f029517e5689a11e"} Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.448256 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.448315 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.448337 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.452872 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.452891 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.452902 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.452918 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.452928 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:03Z","lastTransitionTime":"2025-12-09T15:09:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.461206 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d953045-e94a-4e04-b78e-bc20b3a8c36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hcdn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.469808 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.471312 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.476188 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.487156 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.495353 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-q8f5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92bbf960-39e4-4521-9ba9-c66d302ceb3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2496e752c40e1c175f4ea3321eef81439ae0cd8b988efddf694e31b517d47d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fr45r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-q8f5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.507695 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rqz4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-plvv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rqz4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.518258 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef5255ac80d96695612cdac86e63f82688271183d2daa61af9122a1bdd801089\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rdkb2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.530457 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.547892 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.555901 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.555928 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.555937 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.555951 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.555960 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:03Z","lastTransitionTime":"2025-12-09T15:09:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.562187 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.577346 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.597327 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f66b28-96d7-4c4f-b567-7f0917812e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05b0adb7ab9ef293fc4852b947c3c00832260edfb0472d0477e3c529cef5a814\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qrqxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.610974 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.621606 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.631848 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8clts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df5b4a13-8990-44d5-8b66-a3b696c9774f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11c3246d292d0a1c1ca6f46baf71f93f06b00755a208e4e6fed6cc3653408589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dc6th\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8clts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.643295 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-q8f5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92bbf960-39e4-4521-9ba9-c66d302ceb3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2496e752c40e1c175f4ea3321eef81439ae0cd8b988efddf694e31b517d47d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fr45r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-q8f5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.657158 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rqz4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-plvv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rqz4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.659050 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.659085 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.659093 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.659108 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.659119 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:03Z","lastTransitionTime":"2025-12-09T15:09:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.674542 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef5255ac80d96695612cdac86e63f82688271183d2daa61af9122a1bdd801089\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rdkb2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.688581 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.705164 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.717671 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.729136 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.742412 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.757669 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f66b28-96d7-4c4f-b567-7f0917812e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05b0adb7ab9ef293fc4852b947c3c00832260edfb0472d0477e3c529cef5a814\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qrqxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.761157 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.761214 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.761228 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.761247 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.761259 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:03Z","lastTransitionTime":"2025-12-09T15:09:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.775738 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.789614 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.803358 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.815011 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8clts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df5b4a13-8990-44d5-8b66-a3b696c9774f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11c3246d292d0a1c1ca6f46baf71f93f06b00755a208e4e6fed6cc3653408589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dc6th\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8clts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.833416 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d953045-e94a-4e04-b78e-bc20b3a8c36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://439f6940ba3b88eb74bdac4c72b4da0981faebaa60856c84f029517e5689a11e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hcdn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.863664 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.863999 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.864086 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.864176 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.864275 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:03Z","lastTransitionTime":"2025-12-09T15:09:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.966833 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.967286 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.967453 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.967719 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:03 crc kubenswrapper[4716]: I1209 15:09:03.967956 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:03Z","lastTransitionTime":"2025-12-09T15:09:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.070899 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.070957 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.070968 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.070988 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.071000 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:04Z","lastTransitionTime":"2025-12-09T15:09:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.160611 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.160675 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.160685 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.160699 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.160709 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:04Z","lastTransitionTime":"2025-12-09T15:09:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:04 crc kubenswrapper[4716]: E1209 15:09:04.173511 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d76960a-8b61-4af0-9ff6-3ba0f6120862\\\",\\\"systemUUID\\\":\\\"9da27605-fb2f-423a-b7bb-978a678a6bed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:04Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.177500 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.177528 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.177537 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.177550 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.177559 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:04Z","lastTransitionTime":"2025-12-09T15:09:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:04 crc kubenswrapper[4716]: E1209 15:09:04.193653 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d76960a-8b61-4af0-9ff6-3ba0f6120862\\\",\\\"systemUUID\\\":\\\"9da27605-fb2f-423a-b7bb-978a678a6bed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:04Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.198574 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.198639 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.198649 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.198666 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.198674 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:04Z","lastTransitionTime":"2025-12-09T15:09:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.213031 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:09:04 crc kubenswrapper[4716]: E1209 15:09:04.213013 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d76960a-8b61-4af0-9ff6-3ba0f6120862\\\",\\\"systemUUID\\\":\\\"9da27605-fb2f-423a-b7bb-978a678a6bed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:04Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:04 crc kubenswrapper[4716]: E1209 15:09:04.213170 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.217419 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.217465 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.217474 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.217491 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.217501 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:04Z","lastTransitionTime":"2025-12-09T15:09:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:04 crc kubenswrapper[4716]: E1209 15:09:04.229317 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d76960a-8b61-4af0-9ff6-3ba0f6120862\\\",\\\"systemUUID\\\":\\\"9da27605-fb2f-423a-b7bb-978a678a6bed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:04Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.233000 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.233044 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.233056 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.233074 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.233086 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:04Z","lastTransitionTime":"2025-12-09T15:09:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:04 crc kubenswrapper[4716]: E1209 15:09:04.245004 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d76960a-8b61-4af0-9ff6-3ba0f6120862\\\",\\\"systemUUID\\\":\\\"9da27605-fb2f-423a-b7bb-978a678a6bed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:04Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:04 crc kubenswrapper[4716]: E1209 15:09:04.245160 4716 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.246583 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.246614 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.246654 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.246671 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.246682 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:04Z","lastTransitionTime":"2025-12-09T15:09:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.348936 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.349001 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.349012 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.349029 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.349039 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:04Z","lastTransitionTime":"2025-12-09T15:09:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.453522 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.453574 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.453585 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.453599 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.453616 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:04Z","lastTransitionTime":"2025-12-09T15:09:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.556030 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.556078 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.556086 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.556104 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.556112 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:04Z","lastTransitionTime":"2025-12-09T15:09:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.658409 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.658445 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.658456 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.658471 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.658481 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:04Z","lastTransitionTime":"2025-12-09T15:09:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.761410 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.761460 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.761473 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.761490 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.761502 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:04Z","lastTransitionTime":"2025-12-09T15:09:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.864314 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.864375 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.864389 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.864406 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.864419 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:04Z","lastTransitionTime":"2025-12-09T15:09:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.967525 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.967565 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.967574 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.967588 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:04 crc kubenswrapper[4716]: I1209 15:09:04.967598 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:04Z","lastTransitionTime":"2025-12-09T15:09:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.070587 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.070642 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.070652 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.070670 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.070681 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:05Z","lastTransitionTime":"2025-12-09T15:09:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.173128 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.173174 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.173184 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.173202 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.173211 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:05Z","lastTransitionTime":"2025-12-09T15:09:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.213499 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:09:05 crc kubenswrapper[4716]: E1209 15:09:05.213652 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.214153 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:09:05 crc kubenswrapper[4716]: E1209 15:09:05.214314 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.275817 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.275884 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.275897 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.275914 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.275929 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:05Z","lastTransitionTime":"2025-12-09T15:09:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.378984 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.379027 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.379036 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.379050 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.379060 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:05Z","lastTransitionTime":"2025-12-09T15:09:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.482129 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.482171 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.482180 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.482196 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.482207 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:05Z","lastTransitionTime":"2025-12-09T15:09:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.584884 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.584931 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.584941 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.584958 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.584968 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:05Z","lastTransitionTime":"2025-12-09T15:09:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.688435 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.688501 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.688512 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.688529 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.688538 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:05Z","lastTransitionTime":"2025-12-09T15:09:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.799540 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.799578 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.799589 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.799608 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.799642 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:05Z","lastTransitionTime":"2025-12-09T15:09:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.902485 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.902522 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.902531 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.902547 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:05 crc kubenswrapper[4716]: I1209 15:09:05.902557 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:05Z","lastTransitionTime":"2025-12-09T15:09:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.005945 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.005995 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.006007 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.006030 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.006043 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:06Z","lastTransitionTime":"2025-12-09T15:09:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.108747 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.108785 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.108795 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.108812 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.108823 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:06Z","lastTransitionTime":"2025-12-09T15:09:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.211547 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.211586 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.211596 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.211612 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.211637 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:06Z","lastTransitionTime":"2025-12-09T15:09:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.213042 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:09:06 crc kubenswrapper[4716]: E1209 15:09:06.213171 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.314362 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.314422 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.314447 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.314471 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.314488 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:06Z","lastTransitionTime":"2025-12-09T15:09:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.417242 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.417283 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.417295 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.417311 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.417321 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:06Z","lastTransitionTime":"2025-12-09T15:09:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.458417 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hcdn4_3d953045-e94a-4e04-b78e-bc20b3a8c36c/ovnkube-controller/0.log" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.461156 4716 generic.go:334] "Generic (PLEG): container finished" podID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerID="439f6940ba3b88eb74bdac4c72b4da0981faebaa60856c84f029517e5689a11e" exitCode=1 Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.461198 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" event={"ID":"3d953045-e94a-4e04-b78e-bc20b3a8c36c","Type":"ContainerDied","Data":"439f6940ba3b88eb74bdac4c72b4da0981faebaa60856c84f029517e5689a11e"} Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.463131 4716 scope.go:117] "RemoveContainer" containerID="439f6940ba3b88eb74bdac4c72b4da0981faebaa60856c84f029517e5689a11e" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.479502 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:06Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.493421 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:06Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.507882 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:06Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.520231 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.520276 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.520286 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.520302 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.520313 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:06Z","lastTransitionTime":"2025-12-09T15:09:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.521900 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-q8f5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92bbf960-39e4-4521-9ba9-c66d302ceb3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2496e752c40e1c175f4ea3321eef81439ae0cd8b988efddf694e31b517d47d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fr45r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-q8f5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:06Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.536709 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rqz4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-plvv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rqz4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:06Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.549429 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef5255ac80d96695612cdac86e63f82688271183d2daa61af9122a1bdd801089\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rdkb2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:06Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.562427 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:06Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.576570 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:06Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.592814 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:06Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.607753 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:06Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.623305 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.623350 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.623361 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.623382 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.623395 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:06Z","lastTransitionTime":"2025-12-09T15:09:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.628169 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f66b28-96d7-4c4f-b567-7f0917812e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05b0adb7ab9ef293fc4852b947c3c00832260edfb0472d0477e3c529cef5a814\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qrqxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:06Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.640699 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:06Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.651710 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8clts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df5b4a13-8990-44d5-8b66-a3b696c9774f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11c3246d292d0a1c1ca6f46baf71f93f06b00755a208e4e6fed6cc3653408589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dc6th\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8clts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:06Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.671964 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d953045-e94a-4e04-b78e-bc20b3a8c36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://439f6940ba3b88eb74bdac4c72b4da0981faebaa60856c84f029517e5689a11e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://439f6940ba3b88eb74bdac4c72b4da0981faebaa60856c84f029517e5689a11e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T15:09:06Z\\\",\\\"message\\\":\\\"6 6027 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1209 15:09:05.587386 6027 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1209 15:09:05.587475 6027 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1209 15:09:05.587576 6027 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1209 15:09:05.587853 6027 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1209 15:09:05.587873 6027 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1209 15:09:05.587917 6027 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1209 15:09:05.587940 6027 factory.go:656] Stopping watch factory\\\\nI1209 15:09:05.587957 6027 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1209 15:09:05.587961 6027 handler.go:208] Removed *v1.Node event handler 2\\\\nI1209 15:09:05.588020 6027 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hcdn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:06Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.725701 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.725752 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.725768 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.725790 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.725805 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:06Z","lastTransitionTime":"2025-12-09T15:09:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.829208 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.829315 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.829337 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.829367 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.829387 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:06Z","lastTransitionTime":"2025-12-09T15:09:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.932250 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.932319 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.932334 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.932357 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.932372 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:06Z","lastTransitionTime":"2025-12-09T15:09:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:06 crc kubenswrapper[4716]: I1209 15:09:06.967057 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:09:06 crc kubenswrapper[4716]: E1209 15:09:06.967296 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:09:22.967261767 +0000 UTC m=+50.122005765 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.035285 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.035343 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.035360 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.035383 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.035395 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:07Z","lastTransitionTime":"2025-12-09T15:09:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.067900 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.067961 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.067993 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.068029 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:09:07 crc kubenswrapper[4716]: E1209 15:09:07.068105 4716 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 09 15:09:07 crc kubenswrapper[4716]: E1209 15:09:07.068111 4716 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 09 15:09:07 crc kubenswrapper[4716]: E1209 15:09:07.068247 4716 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 09 15:09:07 crc kubenswrapper[4716]: E1209 15:09:07.068249 4716 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 09 15:09:07 crc kubenswrapper[4716]: E1209 15:09:07.068300 4716 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 09 15:09:07 crc kubenswrapper[4716]: E1209 15:09:07.068318 4716 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 15:09:07 crc kubenswrapper[4716]: E1209 15:09:07.068313 4716 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 09 15:09:07 crc kubenswrapper[4716]: E1209 15:09:07.068303 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-09 15:09:23.068198174 +0000 UTC m=+50.222942162 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 09 15:09:07 crc kubenswrapper[4716]: E1209 15:09:07.068265 4716 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 15:09:07 crc kubenswrapper[4716]: E1209 15:09:07.068420 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-09 15:09:23.068396709 +0000 UTC m=+50.223140747 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 15:09:07 crc kubenswrapper[4716]: E1209 15:09:07.068455 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-09 15:09:23.068444821 +0000 UTC m=+50.223188929 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 09 15:09:07 crc kubenswrapper[4716]: E1209 15:09:07.068478 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-09 15:09:23.068469212 +0000 UTC m=+50.223213270 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.138718 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.138756 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.138767 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.138783 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.138794 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:07Z","lastTransitionTime":"2025-12-09T15:09:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.213320 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.213426 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:09:07 crc kubenswrapper[4716]: E1209 15:09:07.213545 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:09:07 crc kubenswrapper[4716]: E1209 15:09:07.213740 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.240892 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.240943 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.240955 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.240976 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.240992 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:07Z","lastTransitionTime":"2025-12-09T15:09:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.344922 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.344977 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.344990 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.345009 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.345024 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:07Z","lastTransitionTime":"2025-12-09T15:09:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.448559 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.448642 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.448662 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.448684 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.448699 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:07Z","lastTransitionTime":"2025-12-09T15:09:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.468348 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hcdn4_3d953045-e94a-4e04-b78e-bc20b3a8c36c/ovnkube-controller/0.log" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.471589 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" event={"ID":"3d953045-e94a-4e04-b78e-bc20b3a8c36c","Type":"ContainerStarted","Data":"a6a86c49edcf74a5a36cb0f2b4c7f1493a03b9a45e09e71e6cb3963964c14a7c"} Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.488222 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:07Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.505135 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:07Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.520129 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:07Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.532266 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-q8f5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92bbf960-39e4-4521-9ba9-c66d302ceb3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2496e752c40e1c175f4ea3321eef81439ae0cd8b988efddf694e31b517d47d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fr45r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-q8f5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:07Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.566842 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.566880 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.566892 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.566935 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.566949 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:07Z","lastTransitionTime":"2025-12-09T15:09:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.567482 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rqz4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-plvv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rqz4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:07Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.580864 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef5255ac80d96695612cdac86e63f82688271183d2daa61af9122a1bdd801089\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rdkb2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:07Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.599019 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:07Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.622644 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:07Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.639492 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:07Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.664194 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:07Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.668939 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.669039 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.669049 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.669078 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.669089 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:07Z","lastTransitionTime":"2025-12-09T15:09:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.679990 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f66b28-96d7-4c4f-b567-7f0917812e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05b0adb7ab9ef293fc4852b947c3c00832260edfb0472d0477e3c529cef5a814\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qrqxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:07Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.692331 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:07Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.704608 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8clts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df5b4a13-8990-44d5-8b66-a3b696c9774f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11c3246d292d0a1c1ca6f46baf71f93f06b00755a208e4e6fed6cc3653408589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dc6th\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8clts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:07Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.725480 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d953045-e94a-4e04-b78e-bc20b3a8c36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6a86c49edcf74a5a36cb0f2b4c7f1493a03b9a45e09e71e6cb3963964c14a7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://439f6940ba3b88eb74bdac4c72b4da0981faebaa60856c84f029517e5689a11e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T15:09:06Z\\\",\\\"message\\\":\\\"6 6027 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1209 15:09:05.587386 6027 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1209 15:09:05.587475 6027 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1209 15:09:05.587576 6027 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1209 15:09:05.587853 6027 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1209 15:09:05.587873 6027 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1209 15:09:05.587917 6027 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1209 15:09:05.587940 6027 factory.go:656] Stopping watch factory\\\\nI1209 15:09:05.587957 6027 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1209 15:09:05.587961 6027 handler.go:208] Removed *v1.Node event handler 2\\\\nI1209 15:09:05.588020 6027 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hcdn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:07Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.772379 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.772424 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.772436 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.772453 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.772464 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:07Z","lastTransitionTime":"2025-12-09T15:09:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.874646 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.874691 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.874702 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.874720 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.874733 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:07Z","lastTransitionTime":"2025-12-09T15:09:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.976695 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.976741 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.976767 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.976785 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:07 crc kubenswrapper[4716]: I1209 15:09:07.976795 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:07Z","lastTransitionTime":"2025-12-09T15:09:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.079497 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.079544 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.079560 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.079583 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.079598 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:08Z","lastTransitionTime":"2025-12-09T15:09:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.182642 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.182685 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.182697 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.182716 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.182732 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:08Z","lastTransitionTime":"2025-12-09T15:09:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.212512 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:09:08 crc kubenswrapper[4716]: E1209 15:09:08.212725 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.285122 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.285197 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.285213 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.285233 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.285258 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:08Z","lastTransitionTime":"2025-12-09T15:09:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.387840 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.387902 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.387915 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.387932 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.387944 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:08Z","lastTransitionTime":"2025-12-09T15:09:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.490523 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.490570 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.490579 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.490596 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.490615 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:08Z","lastTransitionTime":"2025-12-09T15:09:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.592569 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.592610 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.592648 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.592668 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.592679 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:08Z","lastTransitionTime":"2025-12-09T15:09:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.695289 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.695330 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.695341 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.695357 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.695368 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:08Z","lastTransitionTime":"2025-12-09T15:09:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.798106 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.798145 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.798155 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.798170 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.798182 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:08Z","lastTransitionTime":"2025-12-09T15:09:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.900608 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.900686 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.900698 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.900718 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.900730 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:08Z","lastTransitionTime":"2025-12-09T15:09:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.981748 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:09:08 crc kubenswrapper[4716]: I1209 15:09:08.993747 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8clts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df5b4a13-8990-44d5-8b66-a3b696c9774f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11c3246d292d0a1c1ca6f46baf71f93f06b00755a208e4e6fed6cc3653408589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dc6th\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8clts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:08Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.003421 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.003450 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.003458 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.003472 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.003482 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:09Z","lastTransitionTime":"2025-12-09T15:09:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.008215 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:09Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.040186 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d953045-e94a-4e04-b78e-bc20b3a8c36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6a86c49edcf74a5a36cb0f2b4c7f1493a03b9a45e09e71e6cb3963964c14a7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://439f6940ba3b88eb74bdac4c72b4da0981faebaa60856c84f029517e5689a11e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T15:09:06Z\\\",\\\"message\\\":\\\"6 6027 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1209 15:09:05.587386 6027 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1209 15:09:05.587475 6027 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1209 15:09:05.587576 6027 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1209 15:09:05.587853 6027 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1209 15:09:05.587873 6027 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1209 15:09:05.587917 6027 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1209 15:09:05.587940 6027 factory.go:656] Stopping watch factory\\\\nI1209 15:09:05.587957 6027 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1209 15:09:05.587961 6027 handler.go:208] Removed *v1.Node event handler 2\\\\nI1209 15:09:05.588020 6027 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hcdn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:09Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.053695 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:09Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.064532 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-q8f5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92bbf960-39e4-4521-9ba9-c66d302ceb3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2496e752c40e1c175f4ea3321eef81439ae0cd8b988efddf694e31b517d47d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fr45r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-q8f5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:09Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.079176 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rqz4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-plvv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rqz4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:09Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.093121 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef5255ac80d96695612cdac86e63f82688271183d2daa61af9122a1bdd801089\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rdkb2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:09Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.106227 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.106277 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.106287 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.106310 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.106323 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:09Z","lastTransitionTime":"2025-12-09T15:09:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.107715 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:09Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.119151 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:09Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.131150 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:09Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.146090 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:09Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.159514 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:09Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.173701 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f66b28-96d7-4c4f-b567-7f0917812e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05b0adb7ab9ef293fc4852b947c3c00832260edfb0472d0477e3c529cef5a814\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qrqxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:09Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.191377 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:09Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.209167 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.209218 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.209233 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.209275 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.209295 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:09Z","lastTransitionTime":"2025-12-09T15:09:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.213518 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:09:09 crc kubenswrapper[4716]: E1209 15:09:09.213663 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.213710 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:09:09 crc kubenswrapper[4716]: E1209 15:09:09.213754 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.312316 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.312355 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.312366 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.312384 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.312396 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:09Z","lastTransitionTime":"2025-12-09T15:09:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.334193 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7"] Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.334824 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.337100 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.338257 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.361335 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d953045-e94a-4e04-b78e-bc20b3a8c36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6a86c49edcf74a5a36cb0f2b4c7f1493a03b9a45e09e71e6cb3963964c14a7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://439f6940ba3b88eb74bdac4c72b4da0981faebaa60856c84f029517e5689a11e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T15:09:06Z\\\",\\\"message\\\":\\\"6 6027 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1209 15:09:05.587386 6027 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1209 15:09:05.587475 6027 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1209 15:09:05.587576 6027 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1209 15:09:05.587853 6027 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1209 15:09:05.587873 6027 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1209 15:09:05.587917 6027 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1209 15:09:05.587940 6027 factory.go:656] Stopping watch factory\\\\nI1209 15:09:05.587957 6027 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1209 15:09:05.587961 6027 handler.go:208] Removed *v1.Node event handler 2\\\\nI1209 15:09:05.588020 6027 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hcdn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:09Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.375776 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff607c0e-2a04-4599-b107-8ccbfed1d376\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:09:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7wtn7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:09Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.387706 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ff607c0e-2a04-4599-b107-8ccbfed1d376-env-overrides\") pod \"ovnkube-control-plane-749d76644c-7wtn7\" (UID: \"ff607c0e-2a04-4599-b107-8ccbfed1d376\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.387798 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s8vnh\" (UniqueName: \"kubernetes.io/projected/ff607c0e-2a04-4599-b107-8ccbfed1d376-kube-api-access-s8vnh\") pod \"ovnkube-control-plane-749d76644c-7wtn7\" (UID: \"ff607c0e-2a04-4599-b107-8ccbfed1d376\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.387859 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/ff607c0e-2a04-4599-b107-8ccbfed1d376-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-7wtn7\" (UID: \"ff607c0e-2a04-4599-b107-8ccbfed1d376\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.387907 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/ff607c0e-2a04-4599-b107-8ccbfed1d376-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-7wtn7\" (UID: \"ff607c0e-2a04-4599-b107-8ccbfed1d376\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.390515 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:09Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.412568 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:09Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.414799 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.414851 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.414863 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.414897 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.414911 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:09Z","lastTransitionTime":"2025-12-09T15:09:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.425952 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:09Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.437566 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-q8f5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92bbf960-39e4-4521-9ba9-c66d302ceb3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2496e752c40e1c175f4ea3321eef81439ae0cd8b988efddf694e31b517d47d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fr45r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-q8f5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:09Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.452812 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rqz4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-plvv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rqz4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:09Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.465554 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef5255ac80d96695612cdac86e63f82688271183d2daa61af9122a1bdd801089\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rdkb2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:09Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.478578 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:09Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.488907 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/ff607c0e-2a04-4599-b107-8ccbfed1d376-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-7wtn7\" (UID: \"ff607c0e-2a04-4599-b107-8ccbfed1d376\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.488937 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/ff607c0e-2a04-4599-b107-8ccbfed1d376-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-7wtn7\" (UID: \"ff607c0e-2a04-4599-b107-8ccbfed1d376\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.488972 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ff607c0e-2a04-4599-b107-8ccbfed1d376-env-overrides\") pod \"ovnkube-control-plane-749d76644c-7wtn7\" (UID: \"ff607c0e-2a04-4599-b107-8ccbfed1d376\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.488996 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s8vnh\" (UniqueName: \"kubernetes.io/projected/ff607c0e-2a04-4599-b107-8ccbfed1d376-kube-api-access-s8vnh\") pod \"ovnkube-control-plane-749d76644c-7wtn7\" (UID: \"ff607c0e-2a04-4599-b107-8ccbfed1d376\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.490326 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ff607c0e-2a04-4599-b107-8ccbfed1d376-env-overrides\") pod \"ovnkube-control-plane-749d76644c-7wtn7\" (UID: \"ff607c0e-2a04-4599-b107-8ccbfed1d376\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.490414 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/ff607c0e-2a04-4599-b107-8ccbfed1d376-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-7wtn7\" (UID: \"ff607c0e-2a04-4599-b107-8ccbfed1d376\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.494586 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/ff607c0e-2a04-4599-b107-8ccbfed1d376-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-7wtn7\" (UID: \"ff607c0e-2a04-4599-b107-8ccbfed1d376\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.495342 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:09Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.508084 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s8vnh\" (UniqueName: \"kubernetes.io/projected/ff607c0e-2a04-4599-b107-8ccbfed1d376-kube-api-access-s8vnh\") pod \"ovnkube-control-plane-749d76644c-7wtn7\" (UID: \"ff607c0e-2a04-4599-b107-8ccbfed1d376\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.510715 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:09Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.517454 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.517594 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.517688 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.517790 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.517864 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:09Z","lastTransitionTime":"2025-12-09T15:09:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.524130 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:09Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.536395 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f66b28-96d7-4c4f-b567-7f0917812e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05b0adb7ab9ef293fc4852b947c3c00832260edfb0472d0477e3c529cef5a814\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qrqxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:09Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.549018 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:09Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.560192 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8clts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df5b4a13-8990-44d5-8b66-a3b696c9774f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11c3246d292d0a1c1ca6f46baf71f93f06b00755a208e4e6fed6cc3653408589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dc6th\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8clts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:09Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.621337 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.621393 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.621405 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.621422 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.621434 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:09Z","lastTransitionTime":"2025-12-09T15:09:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.648715 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.723853 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.723900 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.723912 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.723931 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.723942 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:09Z","lastTransitionTime":"2025-12-09T15:09:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.825474 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.825501 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.825509 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.825524 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.825533 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:09Z","lastTransitionTime":"2025-12-09T15:09:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.927872 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.928330 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.928354 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.928384 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:09 crc kubenswrapper[4716]: I1209 15:09:09.928407 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:09Z","lastTransitionTime":"2025-12-09T15:09:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.030594 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.030661 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.030675 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.030696 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.030711 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:10Z","lastTransitionTime":"2025-12-09T15:09:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.133461 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.133502 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.133510 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.133527 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.133536 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:10Z","lastTransitionTime":"2025-12-09T15:09:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.213893 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:09:10 crc kubenswrapper[4716]: E1209 15:09:10.214096 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.236175 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.236235 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.236256 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.236290 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.236310 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:10Z","lastTransitionTime":"2025-12-09T15:09:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.339114 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.339173 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.339183 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.339199 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.339209 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:10Z","lastTransitionTime":"2025-12-09T15:09:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.441553 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.441645 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.441660 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.441686 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.441699 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:10Z","lastTransitionTime":"2025-12-09T15:09:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.482824 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hcdn4_3d953045-e94a-4e04-b78e-bc20b3a8c36c/ovnkube-controller/1.log" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.483433 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hcdn4_3d953045-e94a-4e04-b78e-bc20b3a8c36c/ovnkube-controller/0.log" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.485966 4716 generic.go:334] "Generic (PLEG): container finished" podID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerID="a6a86c49edcf74a5a36cb0f2b4c7f1493a03b9a45e09e71e6cb3963964c14a7c" exitCode=1 Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.486026 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" event={"ID":"3d953045-e94a-4e04-b78e-bc20b3a8c36c","Type":"ContainerDied","Data":"a6a86c49edcf74a5a36cb0f2b4c7f1493a03b9a45e09e71e6cb3963964c14a7c"} Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.486080 4716 scope.go:117] "RemoveContainer" containerID="439f6940ba3b88eb74bdac4c72b4da0981faebaa60856c84f029517e5689a11e" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.486820 4716 scope.go:117] "RemoveContainer" containerID="a6a86c49edcf74a5a36cb0f2b4c7f1493a03b9a45e09e71e6cb3963964c14a7c" Dec 09 15:09:10 crc kubenswrapper[4716]: E1209 15:09:10.486969 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-hcdn4_openshift-ovn-kubernetes(3d953045-e94a-4e04-b78e-bc20b3a8c36c)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.487895 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7" event={"ID":"ff607c0e-2a04-4599-b107-8ccbfed1d376","Type":"ContainerStarted","Data":"9c9ead9bb35730e5bec0cd95c265e4918a0dc3dd1dd0330a27316de4f3901108"} Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.487944 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7" event={"ID":"ff607c0e-2a04-4599-b107-8ccbfed1d376","Type":"ContainerStarted","Data":"fada1f30125149bddf6047b764fcdbbecce679857a66b8f2eaa7c00d84a5b9d3"} Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.487959 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7" event={"ID":"ff607c0e-2a04-4599-b107-8ccbfed1d376","Type":"ContainerStarted","Data":"11bf69c323d297ce760086cbef52353a3bfd148e59836db72848f6ee1c3405dd"} Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.503482 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.514716 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8clts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df5b4a13-8990-44d5-8b66-a3b696c9774f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11c3246d292d0a1c1ca6f46baf71f93f06b00755a208e4e6fed6cc3653408589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dc6th\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8clts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.535541 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d953045-e94a-4e04-b78e-bc20b3a8c36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6a86c49edcf74a5a36cb0f2b4c7f1493a03b9a45e09e71e6cb3963964c14a7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://439f6940ba3b88eb74bdac4c72b4da0981faebaa60856c84f029517e5689a11e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T15:09:06Z\\\",\\\"message\\\":\\\"6 6027 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1209 15:09:05.587386 6027 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1209 15:09:05.587475 6027 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1209 15:09:05.587576 6027 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1209 15:09:05.587853 6027 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1209 15:09:05.587873 6027 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1209 15:09:05.587917 6027 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1209 15:09:05.587940 6027 factory.go:656] Stopping watch factory\\\\nI1209 15:09:05.587957 6027 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1209 15:09:05.587961 6027 handler.go:208] Removed *v1.Node event handler 2\\\\nI1209 15:09:05.588020 6027 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6a86c49edcf74a5a36cb0f2b4c7f1493a03b9a45e09e71e6cb3963964c14a7c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T15:09:09Z\\\",\\\"message\\\":\\\"y-operators]} name:Service_openshift-marketplace/community-operators_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.189:50051:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d389393c-7ba9-422c-b3f5-06e391d537d2}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 15:09:07.987135 6151 admin_network_policy_controller.go:451] Adding Namespace in Admin Network Policy controller openshift-ingress-canary\\\\nI1209 15:09:07.987310 6151 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1209 15:09:07.987340 6151 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1209 15:09:07.987367 6151 factory.go:656] Stopping watch factory\\\\nI1209 15:09:07.987368 6151 handler.go:208] Removed *v1.Node event handler 2\\\\nI1209 15:09:07.987382 6151 ovnkube.go:599] Stopped ovnkube\\\\nI1209 15:09:07.987420 6151 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1209 15:09:07.987436 6151 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1209 15:09:07.987517 6151 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hcdn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.543373 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.543650 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.543785 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.543916 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.544021 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:10Z","lastTransitionTime":"2025-12-09T15:09:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.555764 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-q8f5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92bbf960-39e4-4521-9ba9-c66d302ceb3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2496e752c40e1c175f4ea3321eef81439ae0cd8b988efddf694e31b517d47d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fr45r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-q8f5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.570326 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rqz4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-plvv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rqz4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.586670 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef5255ac80d96695612cdac86e63f82688271183d2daa61af9122a1bdd801089\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rdkb2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.598746 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff607c0e-2a04-4599-b107-8ccbfed1d376\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:09:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7wtn7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.610205 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.620768 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.632064 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.646066 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.646329 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.646523 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.646533 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.646547 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.646556 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:10Z","lastTransitionTime":"2025-12-09T15:09:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.661027 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.676369 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f66b28-96d7-4c4f-b567-7f0917812e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05b0adb7ab9ef293fc4852b947c3c00832260edfb0472d0477e3c529cef5a814\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qrqxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.689776 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.702337 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.714520 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.728339 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f66b28-96d7-4c4f-b567-7f0917812e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05b0adb7ab9ef293fc4852b947c3c00832260edfb0472d0477e3c529cef5a814\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qrqxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.742588 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.748853 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.748885 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.748894 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.748911 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.748929 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:10Z","lastTransitionTime":"2025-12-09T15:09:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.757023 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.768735 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.779257 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.788547 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8clts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df5b4a13-8990-44d5-8b66-a3b696c9774f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11c3246d292d0a1c1ca6f46baf71f93f06b00755a208e4e6fed6cc3653408589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dc6th\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8clts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.805275 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d953045-e94a-4e04-b78e-bc20b3a8c36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6a86c49edcf74a5a36cb0f2b4c7f1493a03b9a45e09e71e6cb3963964c14a7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://439f6940ba3b88eb74bdac4c72b4da0981faebaa60856c84f029517e5689a11e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T15:09:06Z\\\",\\\"message\\\":\\\"6 6027 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1209 15:09:05.587386 6027 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1209 15:09:05.587475 6027 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1209 15:09:05.587576 6027 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1209 15:09:05.587853 6027 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1209 15:09:05.587873 6027 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1209 15:09:05.587917 6027 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1209 15:09:05.587940 6027 factory.go:656] Stopping watch factory\\\\nI1209 15:09:05.587957 6027 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1209 15:09:05.587961 6027 handler.go:208] Removed *v1.Node event handler 2\\\\nI1209 15:09:05.588020 6027 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6a86c49edcf74a5a36cb0f2b4c7f1493a03b9a45e09e71e6cb3963964c14a7c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T15:09:09Z\\\",\\\"message\\\":\\\"y-operators]} name:Service_openshift-marketplace/community-operators_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.189:50051:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d389393c-7ba9-422c-b3f5-06e391d537d2}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 15:09:07.987135 6151 admin_network_policy_controller.go:451] Adding Namespace in Admin Network Policy controller openshift-ingress-canary\\\\nI1209 15:09:07.987310 6151 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1209 15:09:07.987340 6151 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1209 15:09:07.987367 6151 factory.go:656] Stopping watch factory\\\\nI1209 15:09:07.987368 6151 handler.go:208] Removed *v1.Node event handler 2\\\\nI1209 15:09:07.987382 6151 ovnkube.go:599] Stopped ovnkube\\\\nI1209 15:09:07.987420 6151 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1209 15:09:07.987436 6151 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1209 15:09:07.987517 6151 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hcdn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.817312 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rqz4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-plvv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rqz4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.826032 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-gpl2n"] Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.826570 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:09:10 crc kubenswrapper[4716]: E1209 15:09:10.826666 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.830439 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef5255ac80d96695612cdac86e63f82688271183d2daa61af9122a1bdd801089\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rdkb2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.842722 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff607c0e-2a04-4599-b107-8ccbfed1d376\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fada1f30125149bddf6047b764fcdbbecce679857a66b8f2eaa7c00d84a5b9d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c9ead9bb35730e5bec0cd95c265e4918a0dc3dd1dd0330a27316de4f3901108\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:09:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7wtn7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.851851 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.851878 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.851887 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.851901 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.851912 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:10Z","lastTransitionTime":"2025-12-09T15:09:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.854884 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.868339 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.880587 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.891762 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-q8f5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92bbf960-39e4-4521-9ba9-c66d302ceb3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2496e752c40e1c175f4ea3321eef81439ae0cd8b988efddf694e31b517d47d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fr45r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-q8f5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.901407 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9wxrl\" (UniqueName: \"kubernetes.io/projected/f0e1f1bc-46bd-4293-a9b9-d57c1f83a613-kube-api-access-9wxrl\") pod \"network-metrics-daemon-gpl2n\" (UID: \"f0e1f1bc-46bd-4293-a9b9-d57c1f83a613\") " pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.901437 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f0e1f1bc-46bd-4293-a9b9-d57c1f83a613-metrics-certs\") pod \"network-metrics-daemon-gpl2n\" (UID: \"f0e1f1bc-46bd-4293-a9b9-d57c1f83a613\") " pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.903725 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.913535 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8clts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df5b4a13-8990-44d5-8b66-a3b696c9774f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11c3246d292d0a1c1ca6f46baf71f93f06b00755a208e4e6fed6cc3653408589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dc6th\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8clts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.930863 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d953045-e94a-4e04-b78e-bc20b3a8c36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6a86c49edcf74a5a36cb0f2b4c7f1493a03b9a45e09e71e6cb3963964c14a7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://439f6940ba3b88eb74bdac4c72b4da0981faebaa60856c84f029517e5689a11e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T15:09:06Z\\\",\\\"message\\\":\\\"6 6027 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1209 15:09:05.587386 6027 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1209 15:09:05.587475 6027 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1209 15:09:05.587576 6027 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1209 15:09:05.587853 6027 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1209 15:09:05.587873 6027 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1209 15:09:05.587917 6027 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1209 15:09:05.587940 6027 factory.go:656] Stopping watch factory\\\\nI1209 15:09:05.587957 6027 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1209 15:09:05.587961 6027 handler.go:208] Removed *v1.Node event handler 2\\\\nI1209 15:09:05.588020 6027 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6a86c49edcf74a5a36cb0f2b4c7f1493a03b9a45e09e71e6cb3963964c14a7c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T15:09:09Z\\\",\\\"message\\\":\\\"y-operators]} name:Service_openshift-marketplace/community-operators_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.189:50051:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d389393c-7ba9-422c-b3f5-06e391d537d2}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 15:09:07.987135 6151 admin_network_policy_controller.go:451] Adding Namespace in Admin Network Policy controller openshift-ingress-canary\\\\nI1209 15:09:07.987310 6151 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1209 15:09:07.987340 6151 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1209 15:09:07.987367 6151 factory.go:656] Stopping watch factory\\\\nI1209 15:09:07.987368 6151 handler.go:208] Removed *v1.Node event handler 2\\\\nI1209 15:09:07.987382 6151 ovnkube.go:599] Stopped ovnkube\\\\nI1209 15:09:07.987420 6151 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1209 15:09:07.987436 6151 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1209 15:09:07.987517 6151 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hcdn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.942070 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.952996 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.954671 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.954731 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.954747 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.954784 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.954798 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:10Z","lastTransitionTime":"2025-12-09T15:09:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.967291 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-q8f5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92bbf960-39e4-4521-9ba9-c66d302ceb3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2496e752c40e1c175f4ea3321eef81439ae0cd8b988efddf694e31b517d47d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fr45r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-q8f5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.979037 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rqz4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-plvv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rqz4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.989830 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef5255ac80d96695612cdac86e63f82688271183d2daa61af9122a1bdd801089\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rdkb2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:10 crc kubenswrapper[4716]: I1209 15:09:10.999866 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff607c0e-2a04-4599-b107-8ccbfed1d376\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fada1f30125149bddf6047b764fcdbbecce679857a66b8f2eaa7c00d84a5b9d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c9ead9bb35730e5bec0cd95c265e4918a0dc3dd1dd0330a27316de4f3901108\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:09:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7wtn7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:10Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.005298 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9wxrl\" (UniqueName: \"kubernetes.io/projected/f0e1f1bc-46bd-4293-a9b9-d57c1f83a613-kube-api-access-9wxrl\") pod \"network-metrics-daemon-gpl2n\" (UID: \"f0e1f1bc-46bd-4293-a9b9-d57c1f83a613\") " pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.005345 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f0e1f1bc-46bd-4293-a9b9-d57c1f83a613-metrics-certs\") pod \"network-metrics-daemon-gpl2n\" (UID: \"f0e1f1bc-46bd-4293-a9b9-d57c1f83a613\") " pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:09:11 crc kubenswrapper[4716]: E1209 15:09:11.005539 4716 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 09 15:09:11 crc kubenswrapper[4716]: E1209 15:09:11.005611 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f0e1f1bc-46bd-4293-a9b9-d57c1f83a613-metrics-certs podName:f0e1f1bc-46bd-4293-a9b9-d57c1f83a613 nodeName:}" failed. No retries permitted until 2025-12-09 15:09:11.505585248 +0000 UTC m=+38.660329236 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f0e1f1bc-46bd-4293-a9b9-d57c1f83a613-metrics-certs") pod "network-metrics-daemon-gpl2n" (UID: "f0e1f1bc-46bd-4293-a9b9-d57c1f83a613") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.022426 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:11Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.036398 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9wxrl\" (UniqueName: \"kubernetes.io/projected/f0e1f1bc-46bd-4293-a9b9-d57c1f83a613-kube-api-access-9wxrl\") pod \"network-metrics-daemon-gpl2n\" (UID: \"f0e1f1bc-46bd-4293-a9b9-d57c1f83a613\") " pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.054130 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:11Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.057058 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.057256 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.057331 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.057417 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.057489 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:11Z","lastTransitionTime":"2025-12-09T15:09:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.076612 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:11Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.090186 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:11Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.105562 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f66b28-96d7-4c4f-b567-7f0917812e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05b0adb7ab9ef293fc4852b947c3c00832260edfb0472d0477e3c529cef5a814\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qrqxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:11Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.115416 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gpl2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0e1f1bc-46bd-4293-a9b9-d57c1f83a613\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wxrl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wxrl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:09:10Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gpl2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:11Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.129393 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:11Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.160319 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.160371 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.160383 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.160396 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.160407 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:11Z","lastTransitionTime":"2025-12-09T15:09:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.213197 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.213290 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:09:11 crc kubenswrapper[4716]: E1209 15:09:11.213745 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:09:11 crc kubenswrapper[4716]: E1209 15:09:11.213763 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.263265 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.263338 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.263348 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.263367 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.263377 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:11Z","lastTransitionTime":"2025-12-09T15:09:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.367094 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.367150 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.367170 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.367210 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.367245 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:11Z","lastTransitionTime":"2025-12-09T15:09:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.471145 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.471202 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.471216 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.471236 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.471247 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:11Z","lastTransitionTime":"2025-12-09T15:09:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.492364 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hcdn4_3d953045-e94a-4e04-b78e-bc20b3a8c36c/ovnkube-controller/1.log" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.509640 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f0e1f1bc-46bd-4293-a9b9-d57c1f83a613-metrics-certs\") pod \"network-metrics-daemon-gpl2n\" (UID: \"f0e1f1bc-46bd-4293-a9b9-d57c1f83a613\") " pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:09:11 crc kubenswrapper[4716]: E1209 15:09:11.509803 4716 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 09 15:09:11 crc kubenswrapper[4716]: E1209 15:09:11.509905 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f0e1f1bc-46bd-4293-a9b9-d57c1f83a613-metrics-certs podName:f0e1f1bc-46bd-4293-a9b9-d57c1f83a613 nodeName:}" failed. No retries permitted until 2025-12-09 15:09:12.50987919 +0000 UTC m=+39.664623178 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f0e1f1bc-46bd-4293-a9b9-d57c1f83a613-metrics-certs") pod "network-metrics-daemon-gpl2n" (UID: "f0e1f1bc-46bd-4293-a9b9-d57c1f83a613") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.573869 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.573916 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.573929 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.573946 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.573959 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:11Z","lastTransitionTime":"2025-12-09T15:09:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.676517 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.676615 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.676651 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.676674 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.676806 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:11Z","lastTransitionTime":"2025-12-09T15:09:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.779153 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.779187 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.779198 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.779213 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.779224 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:11Z","lastTransitionTime":"2025-12-09T15:09:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.881397 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.881452 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.881465 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.881484 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.881496 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:11Z","lastTransitionTime":"2025-12-09T15:09:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.984579 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.984621 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.984650 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.984666 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:11 crc kubenswrapper[4716]: I1209 15:09:11.984676 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:11Z","lastTransitionTime":"2025-12-09T15:09:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.087333 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.087391 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.087437 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.087459 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.087474 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:12Z","lastTransitionTime":"2025-12-09T15:09:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.191006 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.191058 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.191071 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.191093 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.191106 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:12Z","lastTransitionTime":"2025-12-09T15:09:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.213412 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.213416 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:09:12 crc kubenswrapper[4716]: E1209 15:09:12.213559 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:09:12 crc kubenswrapper[4716]: E1209 15:09:12.213695 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.293486 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.293525 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.293539 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.293555 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.293565 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:12Z","lastTransitionTime":"2025-12-09T15:09:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.396086 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.396132 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.396148 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.396171 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.396184 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:12Z","lastTransitionTime":"2025-12-09T15:09:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.499209 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.499270 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.499282 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.499311 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.499323 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:12Z","lastTransitionTime":"2025-12-09T15:09:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.518985 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f0e1f1bc-46bd-4293-a9b9-d57c1f83a613-metrics-certs\") pod \"network-metrics-daemon-gpl2n\" (UID: \"f0e1f1bc-46bd-4293-a9b9-d57c1f83a613\") " pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:09:12 crc kubenswrapper[4716]: E1209 15:09:12.519195 4716 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 09 15:09:12 crc kubenswrapper[4716]: E1209 15:09:12.519297 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f0e1f1bc-46bd-4293-a9b9-d57c1f83a613-metrics-certs podName:f0e1f1bc-46bd-4293-a9b9-d57c1f83a613 nodeName:}" failed. No retries permitted until 2025-12-09 15:09:14.519271245 +0000 UTC m=+41.674015253 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f0e1f1bc-46bd-4293-a9b9-d57c1f83a613-metrics-certs") pod "network-metrics-daemon-gpl2n" (UID: "f0e1f1bc-46bd-4293-a9b9-d57c1f83a613") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.602697 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.602740 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.602751 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.602766 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.602777 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:12Z","lastTransitionTime":"2025-12-09T15:09:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.705251 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.705285 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.705298 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.705314 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.705324 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:12Z","lastTransitionTime":"2025-12-09T15:09:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.807889 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.807938 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.807950 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.807969 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.807983 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:12Z","lastTransitionTime":"2025-12-09T15:09:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.910437 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.910470 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.910479 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.910493 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:12 crc kubenswrapper[4716]: I1209 15:09:12.910502 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:12Z","lastTransitionTime":"2025-12-09T15:09:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.013653 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.013708 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.013721 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.013739 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.013750 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:13Z","lastTransitionTime":"2025-12-09T15:09:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.116465 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.116519 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.116534 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.116556 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.116572 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:13Z","lastTransitionTime":"2025-12-09T15:09:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.213366 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.213320 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:09:13 crc kubenswrapper[4716]: E1209 15:09:13.213558 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:09:13 crc kubenswrapper[4716]: E1209 15:09:13.213951 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.219255 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.219330 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.219355 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.219388 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.219412 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:13Z","lastTransitionTime":"2025-12-09T15:09:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.227650 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8clts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df5b4a13-8990-44d5-8b66-a3b696c9774f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11c3246d292d0a1c1ca6f46baf71f93f06b00755a208e4e6fed6cc3653408589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dc6th\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8clts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:13Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.243463 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:13Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.275133 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d953045-e94a-4e04-b78e-bc20b3a8c36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6a86c49edcf74a5a36cb0f2b4c7f1493a03b9a45e09e71e6cb3963964c14a7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://439f6940ba3b88eb74bdac4c72b4da0981faebaa60856c84f029517e5689a11e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T15:09:06Z\\\",\\\"message\\\":\\\"6 6027 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1209 15:09:05.587386 6027 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1209 15:09:05.587475 6027 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1209 15:09:05.587576 6027 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1209 15:09:05.587853 6027 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1209 15:09:05.587873 6027 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1209 15:09:05.587917 6027 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1209 15:09:05.587940 6027 factory.go:656] Stopping watch factory\\\\nI1209 15:09:05.587957 6027 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1209 15:09:05.587961 6027 handler.go:208] Removed *v1.Node event handler 2\\\\nI1209 15:09:05.588020 6027 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6a86c49edcf74a5a36cb0f2b4c7f1493a03b9a45e09e71e6cb3963964c14a7c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T15:09:09Z\\\",\\\"message\\\":\\\"y-operators]} name:Service_openshift-marketplace/community-operators_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.189:50051:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d389393c-7ba9-422c-b3f5-06e391d537d2}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 15:09:07.987135 6151 admin_network_policy_controller.go:451] Adding Namespace in Admin Network Policy controller openshift-ingress-canary\\\\nI1209 15:09:07.987310 6151 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1209 15:09:07.987340 6151 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1209 15:09:07.987367 6151 factory.go:656] Stopping watch factory\\\\nI1209 15:09:07.987368 6151 handler.go:208] Removed *v1.Node event handler 2\\\\nI1209 15:09:07.987382 6151 ovnkube.go:599] Stopped ovnkube\\\\nI1209 15:09:07.987420 6151 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1209 15:09:07.987436 6151 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1209 15:09:07.987517 6151 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hcdn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:13Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.296475 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:13Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.309057 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-q8f5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92bbf960-39e4-4521-9ba9-c66d302ceb3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2496e752c40e1c175f4ea3321eef81439ae0cd8b988efddf694e31b517d47d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fr45r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-q8f5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:13Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.322131 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.322162 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.322173 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.322192 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.322206 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:13Z","lastTransitionTime":"2025-12-09T15:09:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.324530 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rqz4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-plvv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rqz4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:13Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.338124 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef5255ac80d96695612cdac86e63f82688271183d2daa61af9122a1bdd801089\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rdkb2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:13Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.354181 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff607c0e-2a04-4599-b107-8ccbfed1d376\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fada1f30125149bddf6047b764fcdbbecce679857a66b8f2eaa7c00d84a5b9d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c9ead9bb35730e5bec0cd95c265e4918a0dc3dd1dd0330a27316de4f3901108\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:09:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7wtn7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:13Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.371518 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:13Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.386071 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:13Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.406819 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:13Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.419523 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:13Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.424337 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.424371 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.424381 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.424399 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.424436 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:13Z","lastTransitionTime":"2025-12-09T15:09:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.431588 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:13Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.445776 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f66b28-96d7-4c4f-b567-7f0917812e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05b0adb7ab9ef293fc4852b947c3c00832260edfb0472d0477e3c529cef5a814\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qrqxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:13Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.456392 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gpl2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0e1f1bc-46bd-4293-a9b9-d57c1f83a613\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wxrl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wxrl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:09:10Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gpl2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:13Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.470981 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:13Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.526422 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.526478 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.526490 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.526512 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.526525 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:13Z","lastTransitionTime":"2025-12-09T15:09:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.628558 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.628682 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.628699 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.628716 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.628725 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:13Z","lastTransitionTime":"2025-12-09T15:09:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.731108 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.731162 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.731174 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.731190 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.731202 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:13Z","lastTransitionTime":"2025-12-09T15:09:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.835118 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.835185 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.835206 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.835230 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.835246 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:13Z","lastTransitionTime":"2025-12-09T15:09:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.938701 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.938771 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.938792 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.938817 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:13 crc kubenswrapper[4716]: I1209 15:09:13.938839 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:13Z","lastTransitionTime":"2025-12-09T15:09:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.042004 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.042081 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.042099 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.042123 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.042139 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:14Z","lastTransitionTime":"2025-12-09T15:09:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.145914 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.145976 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.145986 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.146005 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.146014 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:14Z","lastTransitionTime":"2025-12-09T15:09:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.213009 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.213009 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:09:14 crc kubenswrapper[4716]: E1209 15:09:14.213212 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:09:14 crc kubenswrapper[4716]: E1209 15:09:14.213336 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.249125 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.249183 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.249193 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.249211 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.249222 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:14Z","lastTransitionTime":"2025-12-09T15:09:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.352086 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.352140 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.352159 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.352185 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.352205 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:14Z","lastTransitionTime":"2025-12-09T15:09:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.455315 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.455412 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.455440 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.455482 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.455502 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:14Z","lastTransitionTime":"2025-12-09T15:09:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.477480 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.477528 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.477540 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.477580 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.477591 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:14Z","lastTransitionTime":"2025-12-09T15:09:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:14 crc kubenswrapper[4716]: E1209 15:09:14.493389 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d76960a-8b61-4af0-9ff6-3ba0f6120862\\\",\\\"systemUUID\\\":\\\"9da27605-fb2f-423a-b7bb-978a678a6bed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:14Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.498365 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.498464 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.498478 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.498500 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.498513 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:14Z","lastTransitionTime":"2025-12-09T15:09:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:14 crc kubenswrapper[4716]: E1209 15:09:14.511237 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d76960a-8b61-4af0-9ff6-3ba0f6120862\\\",\\\"systemUUID\\\":\\\"9da27605-fb2f-423a-b7bb-978a678a6bed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:14Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.514982 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.515015 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.515024 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.515042 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.515053 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:14Z","lastTransitionTime":"2025-12-09T15:09:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:14 crc kubenswrapper[4716]: E1209 15:09:14.527279 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d76960a-8b61-4af0-9ff6-3ba0f6120862\\\",\\\"systemUUID\\\":\\\"9da27605-fb2f-423a-b7bb-978a678a6bed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:14Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.531612 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.531663 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.531674 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.531692 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.531704 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:14Z","lastTransitionTime":"2025-12-09T15:09:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.533273 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f0e1f1bc-46bd-4293-a9b9-d57c1f83a613-metrics-certs\") pod \"network-metrics-daemon-gpl2n\" (UID: \"f0e1f1bc-46bd-4293-a9b9-d57c1f83a613\") " pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:09:14 crc kubenswrapper[4716]: E1209 15:09:14.533390 4716 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 09 15:09:14 crc kubenswrapper[4716]: E1209 15:09:14.533460 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f0e1f1bc-46bd-4293-a9b9-d57c1f83a613-metrics-certs podName:f0e1f1bc-46bd-4293-a9b9-d57c1f83a613 nodeName:}" failed. No retries permitted until 2025-12-09 15:09:18.533439742 +0000 UTC m=+45.688183730 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f0e1f1bc-46bd-4293-a9b9-d57c1f83a613-metrics-certs") pod "network-metrics-daemon-gpl2n" (UID: "f0e1f1bc-46bd-4293-a9b9-d57c1f83a613") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 09 15:09:14 crc kubenswrapper[4716]: E1209 15:09:14.545443 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d76960a-8b61-4af0-9ff6-3ba0f6120862\\\",\\\"systemUUID\\\":\\\"9da27605-fb2f-423a-b7bb-978a678a6bed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:14Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.550207 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.550244 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.550254 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.550271 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.550282 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:14Z","lastTransitionTime":"2025-12-09T15:09:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:14 crc kubenswrapper[4716]: E1209 15:09:14.564270 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d76960a-8b61-4af0-9ff6-3ba0f6120862\\\",\\\"systemUUID\\\":\\\"9da27605-fb2f-423a-b7bb-978a678a6bed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:14Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:14 crc kubenswrapper[4716]: E1209 15:09:14.567386 4716 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.570753 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.570793 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.570862 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.570900 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.570919 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:14Z","lastTransitionTime":"2025-12-09T15:09:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.673792 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.673828 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.673837 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.673850 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.673859 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:14Z","lastTransitionTime":"2025-12-09T15:09:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.777207 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.777267 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.777281 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.777300 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.777312 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:14Z","lastTransitionTime":"2025-12-09T15:09:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.880046 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.880091 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.880102 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.880118 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.880128 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:14Z","lastTransitionTime":"2025-12-09T15:09:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.983928 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.983974 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.983984 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.984027 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:14 crc kubenswrapper[4716]: I1209 15:09:14.984046 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:14Z","lastTransitionTime":"2025-12-09T15:09:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.086309 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.086349 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.086365 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.086385 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.086397 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:15Z","lastTransitionTime":"2025-12-09T15:09:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.189391 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.189451 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.189467 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.189492 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.189508 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:15Z","lastTransitionTime":"2025-12-09T15:09:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.212951 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.212975 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:09:15 crc kubenswrapper[4716]: E1209 15:09:15.213089 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:09:15 crc kubenswrapper[4716]: E1209 15:09:15.213163 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.292647 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.292696 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.292706 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.292724 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.292735 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:15Z","lastTransitionTime":"2025-12-09T15:09:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.395809 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.395888 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.395903 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.395924 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.395937 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:15Z","lastTransitionTime":"2025-12-09T15:09:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.498332 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.498388 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.498404 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.498424 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.498440 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:15Z","lastTransitionTime":"2025-12-09T15:09:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.600822 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.600910 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.600929 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.601344 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.601586 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:15Z","lastTransitionTime":"2025-12-09T15:09:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.704139 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.704172 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.704181 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.704196 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.704206 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:15Z","lastTransitionTime":"2025-12-09T15:09:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.807795 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.807864 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.807888 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.807920 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.807942 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:15Z","lastTransitionTime":"2025-12-09T15:09:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.910960 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.911016 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.911032 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.911058 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:15 crc kubenswrapper[4716]: I1209 15:09:15.911080 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:15Z","lastTransitionTime":"2025-12-09T15:09:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.014308 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.014396 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.014420 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.014454 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.014478 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:16Z","lastTransitionTime":"2025-12-09T15:09:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.117241 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.117300 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.117313 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.117334 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.117349 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:16Z","lastTransitionTime":"2025-12-09T15:09:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.213234 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:09:16 crc kubenswrapper[4716]: E1209 15:09:16.213374 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.213234 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:09:16 crc kubenswrapper[4716]: E1209 15:09:16.213527 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.220701 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.220757 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.220776 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.220802 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.220818 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:16Z","lastTransitionTime":"2025-12-09T15:09:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.324001 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.324054 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.324066 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.324088 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.324109 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:16Z","lastTransitionTime":"2025-12-09T15:09:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.426184 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.426219 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.426227 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.426241 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.426251 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:16Z","lastTransitionTime":"2025-12-09T15:09:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.528782 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.528818 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.528827 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.528840 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.528850 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:16Z","lastTransitionTime":"2025-12-09T15:09:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.631960 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.632007 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.632018 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.632038 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.632049 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:16Z","lastTransitionTime":"2025-12-09T15:09:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.735067 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.735103 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.735111 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.735124 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.735133 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:16Z","lastTransitionTime":"2025-12-09T15:09:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.837914 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.837968 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.837990 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.838049 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.838071 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:16Z","lastTransitionTime":"2025-12-09T15:09:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.941118 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.941204 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.941218 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.941238 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:16 crc kubenswrapper[4716]: I1209 15:09:16.941250 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:16Z","lastTransitionTime":"2025-12-09T15:09:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.047174 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.047243 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.047255 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.047269 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.047279 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:17Z","lastTransitionTime":"2025-12-09T15:09:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.149995 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.150038 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.150046 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.150060 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.150069 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:17Z","lastTransitionTime":"2025-12-09T15:09:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.213063 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:09:17 crc kubenswrapper[4716]: E1209 15:09:17.213224 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.213289 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:09:17 crc kubenswrapper[4716]: E1209 15:09:17.213421 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.252215 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.252821 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.252836 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.252860 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.252873 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:17Z","lastTransitionTime":"2025-12-09T15:09:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.355406 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.355441 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.355456 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.355474 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.355486 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:17Z","lastTransitionTime":"2025-12-09T15:09:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.458610 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.458681 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.458697 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.458723 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.458777 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:17Z","lastTransitionTime":"2025-12-09T15:09:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.561340 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.561379 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.561391 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.561408 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.561418 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:17Z","lastTransitionTime":"2025-12-09T15:09:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.664084 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.664128 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.664139 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.664157 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.664178 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:17Z","lastTransitionTime":"2025-12-09T15:09:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.766646 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.766697 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.766709 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.766725 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.766735 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:17Z","lastTransitionTime":"2025-12-09T15:09:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.868367 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.868410 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.868422 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.868441 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.868452 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:17Z","lastTransitionTime":"2025-12-09T15:09:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.971732 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.971779 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.971789 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.971804 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:17 crc kubenswrapper[4716]: I1209 15:09:17.971817 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:17Z","lastTransitionTime":"2025-12-09T15:09:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.074572 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.074611 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.074656 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.074675 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.074685 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:18Z","lastTransitionTime":"2025-12-09T15:09:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.177071 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.177118 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.177130 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.177146 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.177156 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:18Z","lastTransitionTime":"2025-12-09T15:09:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.213461 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.213491 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:09:18 crc kubenswrapper[4716]: E1209 15:09:18.213606 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:09:18 crc kubenswrapper[4716]: E1209 15:09:18.213801 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.279684 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.279727 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.279739 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.279759 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.279771 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:18Z","lastTransitionTime":"2025-12-09T15:09:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.383203 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.383258 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.383277 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.383299 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.383315 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:18Z","lastTransitionTime":"2025-12-09T15:09:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.486236 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.486285 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.486295 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.486311 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.486336 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:18Z","lastTransitionTime":"2025-12-09T15:09:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.574880 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f0e1f1bc-46bd-4293-a9b9-d57c1f83a613-metrics-certs\") pod \"network-metrics-daemon-gpl2n\" (UID: \"f0e1f1bc-46bd-4293-a9b9-d57c1f83a613\") " pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:09:18 crc kubenswrapper[4716]: E1209 15:09:18.575046 4716 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 09 15:09:18 crc kubenswrapper[4716]: E1209 15:09:18.575145 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f0e1f1bc-46bd-4293-a9b9-d57c1f83a613-metrics-certs podName:f0e1f1bc-46bd-4293-a9b9-d57c1f83a613 nodeName:}" failed. No retries permitted until 2025-12-09 15:09:26.575122242 +0000 UTC m=+53.729866230 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f0e1f1bc-46bd-4293-a9b9-d57c1f83a613-metrics-certs") pod "network-metrics-daemon-gpl2n" (UID: "f0e1f1bc-46bd-4293-a9b9-d57c1f83a613") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.588812 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.588852 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.588863 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.588879 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.588892 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:18Z","lastTransitionTime":"2025-12-09T15:09:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.691978 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.692005 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.692013 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.692026 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.692036 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:18Z","lastTransitionTime":"2025-12-09T15:09:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.794944 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.795005 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.795022 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.795044 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.795062 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:18Z","lastTransitionTime":"2025-12-09T15:09:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.897449 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.897508 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.897520 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.897536 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:18 crc kubenswrapper[4716]: I1209 15:09:18.897546 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:18Z","lastTransitionTime":"2025-12-09T15:09:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.000108 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.000159 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.000175 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.000199 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.000215 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:19Z","lastTransitionTime":"2025-12-09T15:09:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.103326 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.103380 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.103393 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.103411 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.103423 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:19Z","lastTransitionTime":"2025-12-09T15:09:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.206217 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.206282 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.206293 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.206307 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.206316 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:19Z","lastTransitionTime":"2025-12-09T15:09:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.212601 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.212748 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:09:19 crc kubenswrapper[4716]: E1209 15:09:19.212885 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:09:19 crc kubenswrapper[4716]: E1209 15:09:19.212977 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.308721 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.308758 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.308775 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.308793 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.308805 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:19Z","lastTransitionTime":"2025-12-09T15:09:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.411866 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.411909 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.411919 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.411936 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.411947 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:19Z","lastTransitionTime":"2025-12-09T15:09:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.515120 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.515158 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.515171 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.515188 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.515199 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:19Z","lastTransitionTime":"2025-12-09T15:09:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.617878 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.617920 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.617930 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.617945 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.617959 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:19Z","lastTransitionTime":"2025-12-09T15:09:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.720852 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.720912 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.720927 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.720951 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.720965 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:19Z","lastTransitionTime":"2025-12-09T15:09:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.823522 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.823573 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.823585 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.823601 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.823614 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:19Z","lastTransitionTime":"2025-12-09T15:09:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.926226 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.926270 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.926279 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.926295 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:19 crc kubenswrapper[4716]: I1209 15:09:19.926305 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:19Z","lastTransitionTime":"2025-12-09T15:09:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.028659 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.028694 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.028703 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.028717 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.028726 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:20Z","lastTransitionTime":"2025-12-09T15:09:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.131743 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.131777 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.131786 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.131801 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.131811 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:20Z","lastTransitionTime":"2025-12-09T15:09:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.213657 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.213710 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:09:20 crc kubenswrapper[4716]: E1209 15:09:20.213876 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:09:20 crc kubenswrapper[4716]: E1209 15:09:20.213933 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.234490 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.234617 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.234661 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.234678 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.234690 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:20Z","lastTransitionTime":"2025-12-09T15:09:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.337594 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.337694 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.337717 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.337748 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.337770 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:20Z","lastTransitionTime":"2025-12-09T15:09:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.440821 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.440887 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.440898 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.440913 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.440922 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:20Z","lastTransitionTime":"2025-12-09T15:09:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.545106 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.545166 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.545181 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.545204 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.545218 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:20Z","lastTransitionTime":"2025-12-09T15:09:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.648194 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.648253 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.648265 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.648278 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.648288 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:20Z","lastTransitionTime":"2025-12-09T15:09:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.751670 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.751731 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.751744 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.751762 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.751774 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:20Z","lastTransitionTime":"2025-12-09T15:09:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.854138 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.854209 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.854232 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.854260 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.854280 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:20Z","lastTransitionTime":"2025-12-09T15:09:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.957542 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.957675 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.957720 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.957758 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:20 crc kubenswrapper[4716]: I1209 15:09:20.957782 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:20Z","lastTransitionTime":"2025-12-09T15:09:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.060464 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.060543 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.060571 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.060602 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.060655 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:21Z","lastTransitionTime":"2025-12-09T15:09:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.164097 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.164159 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.164179 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.164203 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.164220 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:21Z","lastTransitionTime":"2025-12-09T15:09:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.213342 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:09:21 crc kubenswrapper[4716]: E1209 15:09:21.213520 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.213609 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:09:21 crc kubenswrapper[4716]: E1209 15:09:21.213778 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.266705 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.266767 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.266787 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.266810 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.266829 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:21Z","lastTransitionTime":"2025-12-09T15:09:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.369433 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.369478 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.369487 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.369501 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.369510 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:21Z","lastTransitionTime":"2025-12-09T15:09:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.471732 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.471789 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.471801 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.471820 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.471833 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:21Z","lastTransitionTime":"2025-12-09T15:09:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.574084 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.574141 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.574157 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.574208 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.574226 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:21Z","lastTransitionTime":"2025-12-09T15:09:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.677493 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.677552 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.677565 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.677583 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.677594 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:21Z","lastTransitionTime":"2025-12-09T15:09:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.780977 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.781046 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.781061 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.781082 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.781096 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:21Z","lastTransitionTime":"2025-12-09T15:09:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.883467 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.883551 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.883567 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.883589 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.883601 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:21Z","lastTransitionTime":"2025-12-09T15:09:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.986469 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.986522 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.986535 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.986572 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:21 crc kubenswrapper[4716]: I1209 15:09:21.986586 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:21Z","lastTransitionTime":"2025-12-09T15:09:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.089972 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.090016 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.090026 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.090043 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.090054 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:22Z","lastTransitionTime":"2025-12-09T15:09:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.163947 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.175719 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.180886 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.192142 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.192182 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.192193 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.192210 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.192223 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:22Z","lastTransitionTime":"2025-12-09T15:09:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.200647 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.213137 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.213128 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:09:22 crc kubenswrapper[4716]: E1209 15:09:22.213306 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:09:22 crc kubenswrapper[4716]: E1209 15:09:22.213875 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.214222 4716 scope.go:117] "RemoveContainer" containerID="a6a86c49edcf74a5a36cb0f2b4c7f1493a03b9a45e09e71e6cb3963964c14a7c" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.221563 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.239217 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.256513 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f66b28-96d7-4c4f-b567-7f0917812e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05b0adb7ab9ef293fc4852b947c3c00832260edfb0472d0477e3c529cef5a814\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qrqxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.270931 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gpl2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0e1f1bc-46bd-4293-a9b9-d57c1f83a613\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wxrl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wxrl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:09:10Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gpl2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.286133 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.294791 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.294839 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.294872 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.294892 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.294905 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:22Z","lastTransitionTime":"2025-12-09T15:09:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.300972 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8clts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df5b4a13-8990-44d5-8b66-a3b696c9774f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11c3246d292d0a1c1ca6f46baf71f93f06b00755a208e4e6fed6cc3653408589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dc6th\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8clts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.319972 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d953045-e94a-4e04-b78e-bc20b3a8c36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6a86c49edcf74a5a36cb0f2b4c7f1493a03b9a45e09e71e6cb3963964c14a7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://439f6940ba3b88eb74bdac4c72b4da0981faebaa60856c84f029517e5689a11e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T15:09:06Z\\\",\\\"message\\\":\\\"6 6027 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1209 15:09:05.587386 6027 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1209 15:09:05.587475 6027 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1209 15:09:05.587576 6027 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1209 15:09:05.587853 6027 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1209 15:09:05.587873 6027 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1209 15:09:05.587917 6027 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1209 15:09:05.587940 6027 factory.go:656] Stopping watch factory\\\\nI1209 15:09:05.587957 6027 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1209 15:09:05.587961 6027 handler.go:208] Removed *v1.Node event handler 2\\\\nI1209 15:09:05.588020 6027 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6a86c49edcf74a5a36cb0f2b4c7f1493a03b9a45e09e71e6cb3963964c14a7c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T15:09:09Z\\\",\\\"message\\\":\\\"y-operators]} name:Service_openshift-marketplace/community-operators_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.189:50051:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d389393c-7ba9-422c-b3f5-06e391d537d2}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 15:09:07.987135 6151 admin_network_policy_controller.go:451] Adding Namespace in Admin Network Policy controller openshift-ingress-canary\\\\nI1209 15:09:07.987310 6151 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1209 15:09:07.987340 6151 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1209 15:09:07.987367 6151 factory.go:656] Stopping watch factory\\\\nI1209 15:09:07.987368 6151 handler.go:208] Removed *v1.Node event handler 2\\\\nI1209 15:09:07.987382 6151 ovnkube.go:599] Stopped ovnkube\\\\nI1209 15:09:07.987420 6151 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1209 15:09:07.987436 6151 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1209 15:09:07.987517 6151 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hcdn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.333891 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.348293 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.363459 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.377058 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-q8f5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92bbf960-39e4-4521-9ba9-c66d302ceb3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2496e752c40e1c175f4ea3321eef81439ae0cd8b988efddf694e31b517d47d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fr45r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-q8f5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.394336 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rqz4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-plvv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rqz4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.396952 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.396969 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.396978 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.396993 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.397005 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:22Z","lastTransitionTime":"2025-12-09T15:09:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.407582 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef5255ac80d96695612cdac86e63f82688271183d2daa61af9122a1bdd801089\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rdkb2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.421022 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff607c0e-2a04-4599-b107-8ccbfed1d376\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fada1f30125149bddf6047b764fcdbbecce679857a66b8f2eaa7c00d84a5b9d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c9ead9bb35730e5bec0cd95c265e4918a0dc3dd1dd0330a27316de4f3901108\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:09:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7wtn7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.432411 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef5255ac80d96695612cdac86e63f82688271183d2daa61af9122a1bdd801089\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rdkb2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.442373 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff607c0e-2a04-4599-b107-8ccbfed1d376\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fada1f30125149bddf6047b764fcdbbecce679857a66b8f2eaa7c00d84a5b9d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c9ead9bb35730e5bec0cd95c265e4918a0dc3dd1dd0330a27316de4f3901108\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:09:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7wtn7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.456365 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.469038 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.484141 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.499006 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.499057 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.499070 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.499090 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.499104 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:22Z","lastTransitionTime":"2025-12-09T15:09:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.500268 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-q8f5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92bbf960-39e4-4521-9ba9-c66d302ceb3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2496e752c40e1c175f4ea3321eef81439ae0cd8b988efddf694e31b517d47d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fr45r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-q8f5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.513327 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rqz4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-plvv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rqz4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.533378 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f66b28-96d7-4c4f-b567-7f0917812e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05b0adb7ab9ef293fc4852b947c3c00832260edfb0472d0477e3c529cef5a814\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qrqxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.535497 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hcdn4_3d953045-e94a-4e04-b78e-bc20b3a8c36c/ovnkube-controller/1.log" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.538946 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" event={"ID":"3d953045-e94a-4e04-b78e-bc20b3a8c36c","Type":"ContainerStarted","Data":"86ae7284d29b1843f9a37270d53192c39c9d582f480af9df7cec9ae6a027f771"} Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.539656 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.554600 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gpl2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0e1f1bc-46bd-4293-a9b9-d57c1f83a613\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wxrl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wxrl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:09:10Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gpl2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.570969 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.588868 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8142ea19-bdab-43d4-b6ec-0eb2dea21b60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51c336339c165a67392862787d5ffdab4fba030a838bcb28cecb6ef0a1b1633a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4977d7373e96d2e9d86ee20a1dadffb84228e1a466bce24e5b5af6c1c6ec93b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4704af0bc981b22b47ed768b34abe27a70ec9445a3b86cbe81e44115c1e6db55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8fe87f75d755671ee24326254af036cce1ac0ef201535b3eb56fde3c3dfea65a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8fe87f75d755671ee24326254af036cce1ac0ef201535b3eb56fde3c3dfea65a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.601559 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.601586 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.601595 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.601609 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.601620 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:22Z","lastTransitionTime":"2025-12-09T15:09:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.609382 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.622501 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.635785 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.648598 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.658550 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8clts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df5b4a13-8990-44d5-8b66-a3b696c9774f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11c3246d292d0a1c1ca6f46baf71f93f06b00755a208e4e6fed6cc3653408589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dc6th\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8clts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.676199 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d953045-e94a-4e04-b78e-bc20b3a8c36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6a86c49edcf74a5a36cb0f2b4c7f1493a03b9a45e09e71e6cb3963964c14a7c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6a86c49edcf74a5a36cb0f2b4c7f1493a03b9a45e09e71e6cb3963964c14a7c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T15:09:09Z\\\",\\\"message\\\":\\\"y-operators]} name:Service_openshift-marketplace/community-operators_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.189:50051:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d389393c-7ba9-422c-b3f5-06e391d537d2}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 15:09:07.987135 6151 admin_network_policy_controller.go:451] Adding Namespace in Admin Network Policy controller openshift-ingress-canary\\\\nI1209 15:09:07.987310 6151 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1209 15:09:07.987340 6151 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1209 15:09:07.987367 6151 factory.go:656] Stopping watch factory\\\\nI1209 15:09:07.987368 6151 handler.go:208] Removed *v1.Node event handler 2\\\\nI1209 15:09:07.987382 6151 ovnkube.go:599] Stopped ovnkube\\\\nI1209 15:09:07.987420 6151 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1209 15:09:07.987436 6151 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1209 15:09:07.987517 6151 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:07Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-hcdn4_openshift-ovn-kubernetes(3d953045-e94a-4e04-b78e-bc20b3a8c36c)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hcdn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.693637 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d953045-e94a-4e04-b78e-bc20b3a8c36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86ae7284d29b1843f9a37270d53192c39c9d582f480af9df7cec9ae6a027f771\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6a86c49edcf74a5a36cb0f2b4c7f1493a03b9a45e09e71e6cb3963964c14a7c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T15:09:09Z\\\",\\\"message\\\":\\\"y-operators]} name:Service_openshift-marketplace/community-operators_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.189:50051:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d389393c-7ba9-422c-b3f5-06e391d537d2}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 15:09:07.987135 6151 admin_network_policy_controller.go:451] Adding Namespace in Admin Network Policy controller openshift-ingress-canary\\\\nI1209 15:09:07.987310 6151 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1209 15:09:07.987340 6151 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1209 15:09:07.987367 6151 factory.go:656] Stopping watch factory\\\\nI1209 15:09:07.987368 6151 handler.go:208] Removed *v1.Node event handler 2\\\\nI1209 15:09:07.987382 6151 ovnkube.go:599] Stopped ovnkube\\\\nI1209 15:09:07.987420 6151 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1209 15:09:07.987436 6151 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1209 15:09:07.987517 6151 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:07Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hcdn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.704342 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.704399 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.704411 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.704432 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.704445 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:22Z","lastTransitionTime":"2025-12-09T15:09:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.708560 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.726794 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-q8f5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92bbf960-39e4-4521-9ba9-c66d302ceb3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2496e752c40e1c175f4ea3321eef81439ae0cd8b988efddf694e31b517d47d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fr45r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-q8f5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.741954 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rqz4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-plvv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rqz4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.754482 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef5255ac80d96695612cdac86e63f82688271183d2daa61af9122a1bdd801089\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rdkb2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.769027 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff607c0e-2a04-4599-b107-8ccbfed1d376\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fada1f30125149bddf6047b764fcdbbecce679857a66b8f2eaa7c00d84a5b9d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c9ead9bb35730e5bec0cd95c265e4918a0dc3dd1dd0330a27316de4f3901108\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:09:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7wtn7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.780855 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.793096 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.805578 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.807093 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.807131 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.807141 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.807158 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.807168 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:22Z","lastTransitionTime":"2025-12-09T15:09:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.821562 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.837209 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.857392 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f66b28-96d7-4c4f-b567-7f0917812e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05b0adb7ab9ef293fc4852b947c3c00832260edfb0472d0477e3c529cef5a814\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qrqxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.868650 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gpl2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0e1f1bc-46bd-4293-a9b9-d57c1f83a613\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wxrl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wxrl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:09:10Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gpl2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.882897 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.896400 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8142ea19-bdab-43d4-b6ec-0eb2dea21b60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51c336339c165a67392862787d5ffdab4fba030a838bcb28cecb6ef0a1b1633a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4977d7373e96d2e9d86ee20a1dadffb84228e1a466bce24e5b5af6c1c6ec93b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4704af0bc981b22b47ed768b34abe27a70ec9445a3b86cbe81e44115c1e6db55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8fe87f75d755671ee24326254af036cce1ac0ef201535b3eb56fde3c3dfea65a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8fe87f75d755671ee24326254af036cce1ac0ef201535b3eb56fde3c3dfea65a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.909559 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.909593 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.909604 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.909639 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.909652 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:22Z","lastTransitionTime":"2025-12-09T15:09:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.910148 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8clts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df5b4a13-8990-44d5-8b66-a3b696c9774f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11c3246d292d0a1c1ca6f46baf71f93f06b00755a208e4e6fed6cc3653408589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dc6th\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8clts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:22 crc kubenswrapper[4716]: I1209 15:09:22.923856 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:22Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.011992 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.012027 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.012039 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.012053 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.012062 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:23Z","lastTransitionTime":"2025-12-09T15:09:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.024745 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:09:23 crc kubenswrapper[4716]: E1209 15:09:23.024880 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:09:55.024852581 +0000 UTC m=+82.179596579 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.114378 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.114466 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.114494 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.114524 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.114542 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:23Z","lastTransitionTime":"2025-12-09T15:09:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.126169 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.126211 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.126243 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.126263 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:09:23 crc kubenswrapper[4716]: E1209 15:09:23.126305 4716 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 09 15:09:23 crc kubenswrapper[4716]: E1209 15:09:23.126376 4716 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 09 15:09:23 crc kubenswrapper[4716]: E1209 15:09:23.126404 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-09 15:09:55.126375361 +0000 UTC m=+82.281119389 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 09 15:09:23 crc kubenswrapper[4716]: E1209 15:09:23.126435 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-09 15:09:55.126418672 +0000 UTC m=+82.281162710 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 09 15:09:23 crc kubenswrapper[4716]: E1209 15:09:23.126471 4716 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 09 15:09:23 crc kubenswrapper[4716]: E1209 15:09:23.126528 4716 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 09 15:09:23 crc kubenswrapper[4716]: E1209 15:09:23.126546 4716 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 15:09:23 crc kubenswrapper[4716]: E1209 15:09:23.126471 4716 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 09 15:09:23 crc kubenswrapper[4716]: E1209 15:09:23.126615 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-09 15:09:55.126591397 +0000 UTC m=+82.281335435 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 15:09:23 crc kubenswrapper[4716]: E1209 15:09:23.126645 4716 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 09 15:09:23 crc kubenswrapper[4716]: E1209 15:09:23.126662 4716 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 15:09:23 crc kubenswrapper[4716]: E1209 15:09:23.126694 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-09 15:09:55.12668454 +0000 UTC m=+82.281428598 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.213678 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.213706 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:09:23 crc kubenswrapper[4716]: E1209 15:09:23.213906 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:09:23 crc kubenswrapper[4716]: E1209 15:09:23.213999 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.216637 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.216663 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.216675 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.216689 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.216701 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:23Z","lastTransitionTime":"2025-12-09T15:09:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.232608 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:23Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.248906 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:23Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.266083 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:23Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.278517 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-q8f5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92bbf960-39e4-4521-9ba9-c66d302ceb3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2496e752c40e1c175f4ea3321eef81439ae0cd8b988efddf694e31b517d47d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fr45r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-q8f5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:23Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.297485 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rqz4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-plvv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rqz4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:23Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.313750 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef5255ac80d96695612cdac86e63f82688271183d2daa61af9122a1bdd801089\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rdkb2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:23Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.319174 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.319251 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.319263 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.319280 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.319297 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:23Z","lastTransitionTime":"2025-12-09T15:09:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.328842 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff607c0e-2a04-4599-b107-8ccbfed1d376\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fada1f30125149bddf6047b764fcdbbecce679857a66b8f2eaa7c00d84a5b9d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c9ead9bb35730e5bec0cd95c265e4918a0dc3dd1dd0330a27316de4f3901108\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:09:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7wtn7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:23Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.345182 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:23Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.358495 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8142ea19-bdab-43d4-b6ec-0eb2dea21b60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51c336339c165a67392862787d5ffdab4fba030a838bcb28cecb6ef0a1b1633a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4977d7373e96d2e9d86ee20a1dadffb84228e1a466bce24e5b5af6c1c6ec93b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4704af0bc981b22b47ed768b34abe27a70ec9445a3b86cbe81e44115c1e6db55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8fe87f75d755671ee24326254af036cce1ac0ef201535b3eb56fde3c3dfea65a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8fe87f75d755671ee24326254af036cce1ac0ef201535b3eb56fde3c3dfea65a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:23Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.373103 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:23Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.385592 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:23Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.399870 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:23Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.420887 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f66b28-96d7-4c4f-b567-7f0917812e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05b0adb7ab9ef293fc4852b947c3c00832260edfb0472d0477e3c529cef5a814\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qrqxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:23Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.423212 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.423255 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.423269 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.423290 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.423302 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:23Z","lastTransitionTime":"2025-12-09T15:09:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.432975 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gpl2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0e1f1bc-46bd-4293-a9b9-d57c1f83a613\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wxrl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wxrl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:09:10Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gpl2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:23Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.447838 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:23Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.462523 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8clts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df5b4a13-8990-44d5-8b66-a3b696c9774f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11c3246d292d0a1c1ca6f46baf71f93f06b00755a208e4e6fed6cc3653408589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dc6th\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8clts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:23Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.481334 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d953045-e94a-4e04-b78e-bc20b3a8c36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86ae7284d29b1843f9a37270d53192c39c9d582f480af9df7cec9ae6a027f771\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6a86c49edcf74a5a36cb0f2b4c7f1493a03b9a45e09e71e6cb3963964c14a7c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T15:09:09Z\\\",\\\"message\\\":\\\"y-operators]} name:Service_openshift-marketplace/community-operators_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.189:50051:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d389393c-7ba9-422c-b3f5-06e391d537d2}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 15:09:07.987135 6151 admin_network_policy_controller.go:451] Adding Namespace in Admin Network Policy controller openshift-ingress-canary\\\\nI1209 15:09:07.987310 6151 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1209 15:09:07.987340 6151 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1209 15:09:07.987367 6151 factory.go:656] Stopping watch factory\\\\nI1209 15:09:07.987368 6151 handler.go:208] Removed *v1.Node event handler 2\\\\nI1209 15:09:07.987382 6151 ovnkube.go:599] Stopped ovnkube\\\\nI1209 15:09:07.987420 6151 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1209 15:09:07.987436 6151 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1209 15:09:07.987517 6151 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:07Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hcdn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:23Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.525658 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.525701 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.525714 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.525733 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.525744 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:23Z","lastTransitionTime":"2025-12-09T15:09:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.543682 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hcdn4_3d953045-e94a-4e04-b78e-bc20b3a8c36c/ovnkube-controller/2.log" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.544200 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hcdn4_3d953045-e94a-4e04-b78e-bc20b3a8c36c/ovnkube-controller/1.log" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.546720 4716 generic.go:334] "Generic (PLEG): container finished" podID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerID="86ae7284d29b1843f9a37270d53192c39c9d582f480af9df7cec9ae6a027f771" exitCode=1 Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.546744 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" event={"ID":"3d953045-e94a-4e04-b78e-bc20b3a8c36c","Type":"ContainerDied","Data":"86ae7284d29b1843f9a37270d53192c39c9d582f480af9df7cec9ae6a027f771"} Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.546789 4716 scope.go:117] "RemoveContainer" containerID="a6a86c49edcf74a5a36cb0f2b4c7f1493a03b9a45e09e71e6cb3963964c14a7c" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.547497 4716 scope.go:117] "RemoveContainer" containerID="86ae7284d29b1843f9a37270d53192c39c9d582f480af9df7cec9ae6a027f771" Dec 09 15:09:23 crc kubenswrapper[4716]: E1209 15:09:23.547718 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hcdn4_openshift-ovn-kubernetes(3d953045-e94a-4e04-b78e-bc20b3a8c36c)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.571826 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d953045-e94a-4e04-b78e-bc20b3a8c36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86ae7284d29b1843f9a37270d53192c39c9d582f480af9df7cec9ae6a027f771\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6a86c49edcf74a5a36cb0f2b4c7f1493a03b9a45e09e71e6cb3963964c14a7c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T15:09:09Z\\\",\\\"message\\\":\\\"y-operators]} name:Service_openshift-marketplace/community-operators_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.189:50051:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d389393c-7ba9-422c-b3f5-06e391d537d2}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 15:09:07.987135 6151 admin_network_policy_controller.go:451] Adding Namespace in Admin Network Policy controller openshift-ingress-canary\\\\nI1209 15:09:07.987310 6151 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1209 15:09:07.987340 6151 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1209 15:09:07.987367 6151 factory.go:656] Stopping watch factory\\\\nI1209 15:09:07.987368 6151 handler.go:208] Removed *v1.Node event handler 2\\\\nI1209 15:09:07.987382 6151 ovnkube.go:599] Stopped ovnkube\\\\nI1209 15:09:07.987420 6151 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1209 15:09:07.987436 6151 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1209 15:09:07.987517 6151 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:07Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ae7284d29b1843f9a37270d53192c39c9d582f480af9df7cec9ae6a027f771\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T15:09:23Z\\\",\\\"message\\\":\\\"map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 15:09:22.997308 6368 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 15:09:22.997228 6368 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1209 15:09:22.997410 6368 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1209 15:09:22.997445 6368 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hcdn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:23Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.585900 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rqz4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-plvv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rqz4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:23Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.598536 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef5255ac80d96695612cdac86e63f82688271183d2daa61af9122a1bdd801089\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rdkb2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:23Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.610544 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff607c0e-2a04-4599-b107-8ccbfed1d376\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fada1f30125149bddf6047b764fcdbbecce679857a66b8f2eaa7c00d84a5b9d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c9ead9bb35730e5bec0cd95c265e4918a0dc3dd1dd0330a27316de4f3901108\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:09:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7wtn7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:23Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.624123 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:23Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.627845 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.627873 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.627881 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.627897 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.627906 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:23Z","lastTransitionTime":"2025-12-09T15:09:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.638249 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:23Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.650049 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:23Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.659359 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-q8f5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92bbf960-39e4-4521-9ba9-c66d302ceb3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2496e752c40e1c175f4ea3321eef81439ae0cd8b988efddf694e31b517d47d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fr45r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-q8f5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:23Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.671952 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:23Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.685583 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f66b28-96d7-4c4f-b567-7f0917812e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05b0adb7ab9ef293fc4852b947c3c00832260edfb0472d0477e3c529cef5a814\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qrqxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:23Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.696368 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gpl2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0e1f1bc-46bd-4293-a9b9-d57c1f83a613\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wxrl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wxrl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:09:10Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gpl2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:23Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.711494 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:23Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.722795 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8142ea19-bdab-43d4-b6ec-0eb2dea21b60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51c336339c165a67392862787d5ffdab4fba030a838bcb28cecb6ef0a1b1633a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4977d7373e96d2e9d86ee20a1dadffb84228e1a466bce24e5b5af6c1c6ec93b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4704af0bc981b22b47ed768b34abe27a70ec9445a3b86cbe81e44115c1e6db55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8fe87f75d755671ee24326254af036cce1ac0ef201535b3eb56fde3c3dfea65a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8fe87f75d755671ee24326254af036cce1ac0ef201535b3eb56fde3c3dfea65a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:23Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.730479 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.730740 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.730752 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.730772 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.730785 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:23Z","lastTransitionTime":"2025-12-09T15:09:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.741344 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:23Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.753872 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:23Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.764781 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:23Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.773558 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8clts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df5b4a13-8990-44d5-8b66-a3b696c9774f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11c3246d292d0a1c1ca6f46baf71f93f06b00755a208e4e6fed6cc3653408589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dc6th\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8clts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:23Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.833571 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.833633 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.833645 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.833666 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.833678 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:23Z","lastTransitionTime":"2025-12-09T15:09:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.936411 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.936459 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.936472 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.936490 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:23 crc kubenswrapper[4716]: I1209 15:09:23.936502 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:23Z","lastTransitionTime":"2025-12-09T15:09:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.040358 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.040412 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.040430 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.040454 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.040472 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:24Z","lastTransitionTime":"2025-12-09T15:09:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.142926 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.142989 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.143006 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.143034 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.143052 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:24Z","lastTransitionTime":"2025-12-09T15:09:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.213239 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.213354 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:09:24 crc kubenswrapper[4716]: E1209 15:09:24.213442 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:09:24 crc kubenswrapper[4716]: E1209 15:09:24.213548 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.247500 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.247589 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.247602 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.247656 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.247679 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:24Z","lastTransitionTime":"2025-12-09T15:09:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.351055 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.351117 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.351137 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.351164 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.351182 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:24Z","lastTransitionTime":"2025-12-09T15:09:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.454516 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.454596 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.454613 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.454664 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.454679 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:24Z","lastTransitionTime":"2025-12-09T15:09:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.552829 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hcdn4_3d953045-e94a-4e04-b78e-bc20b3a8c36c/ovnkube-controller/2.log" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.556370 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.556404 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.556418 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.556432 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.556441 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:24Z","lastTransitionTime":"2025-12-09T15:09:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.557108 4716 scope.go:117] "RemoveContainer" containerID="86ae7284d29b1843f9a37270d53192c39c9d582f480af9df7cec9ae6a027f771" Dec 09 15:09:24 crc kubenswrapper[4716]: E1209 15:09:24.557324 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hcdn4_openshift-ovn-kubernetes(3d953045-e94a-4e04-b78e-bc20b3a8c36c)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.577811 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d953045-e94a-4e04-b78e-bc20b3a8c36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86ae7284d29b1843f9a37270d53192c39c9d582f480af9df7cec9ae6a027f771\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ae7284d29b1843f9a37270d53192c39c9d582f480af9df7cec9ae6a027f771\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T15:09:23Z\\\",\\\"message\\\":\\\"map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 15:09:22.997308 6368 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 15:09:22.997228 6368 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1209 15:09:22.997410 6368 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1209 15:09:22.997445 6368 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:22Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hcdn4_openshift-ovn-kubernetes(3d953045-e94a-4e04-b78e-bc20b3a8c36c)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hcdn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:24Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.592109 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef5255ac80d96695612cdac86e63f82688271183d2daa61af9122a1bdd801089\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rdkb2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:24Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.604765 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff607c0e-2a04-4599-b107-8ccbfed1d376\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fada1f30125149bddf6047b764fcdbbecce679857a66b8f2eaa7c00d84a5b9d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c9ead9bb35730e5bec0cd95c265e4918a0dc3dd1dd0330a27316de4f3901108\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:09:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7wtn7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:24Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.618057 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:24Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.632069 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:24Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.644935 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:24Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.658805 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-q8f5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92bbf960-39e4-4521-9ba9-c66d302ceb3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2496e752c40e1c175f4ea3321eef81439ae0cd8b988efddf694e31b517d47d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fr45r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-q8f5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:24Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.659941 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.660062 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.660091 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.660131 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.660157 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:24Z","lastTransitionTime":"2025-12-09T15:09:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.676249 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rqz4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-plvv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rqz4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:24Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.692147 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f66b28-96d7-4c4f-b567-7f0917812e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05b0adb7ab9ef293fc4852b947c3c00832260edfb0472d0477e3c529cef5a814\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qrqxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:24Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.704333 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gpl2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0e1f1bc-46bd-4293-a9b9-d57c1f83a613\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wxrl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wxrl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:09:10Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gpl2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:24Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.712919 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.712960 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.712995 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.713013 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.713025 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:24Z","lastTransitionTime":"2025-12-09T15:09:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.720192 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:24Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:24 crc kubenswrapper[4716]: E1209 15:09:24.727908 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d76960a-8b61-4af0-9ff6-3ba0f6120862\\\",\\\"systemUUID\\\":\\\"9da27605-fb2f-423a-b7bb-978a678a6bed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:24Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.740258 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.740319 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.740333 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.740361 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.740377 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:24Z","lastTransitionTime":"2025-12-09T15:09:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.753431 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8142ea19-bdab-43d4-b6ec-0eb2dea21b60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51c336339c165a67392862787d5ffdab4fba030a838bcb28cecb6ef0a1b1633a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4977d7373e96d2e9d86ee20a1dadffb84228e1a466bce24e5b5af6c1c6ec93b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4704af0bc981b22b47ed768b34abe27a70ec9445a3b86cbe81e44115c1e6db55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8fe87f75d755671ee24326254af036cce1ac0ef201535b3eb56fde3c3dfea65a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8fe87f75d755671ee24326254af036cce1ac0ef201535b3eb56fde3c3dfea65a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:24Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:24 crc kubenswrapper[4716]: E1209 15:09:24.765497 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d76960a-8b61-4af0-9ff6-3ba0f6120862\\\",\\\"systemUUID\\\":\\\"9da27605-fb2f-423a-b7bb-978a678a6bed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:24Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.774135 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.774165 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.774176 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.774191 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.774200 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:24Z","lastTransitionTime":"2025-12-09T15:09:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.777253 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:24Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:24 crc kubenswrapper[4716]: E1209 15:09:24.793686 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d76960a-8b61-4af0-9ff6-3ba0f6120862\\\",\\\"systemUUID\\\":\\\"9da27605-fb2f-423a-b7bb-978a678a6bed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:24Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.795425 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:24Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.797121 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.797149 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.797158 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.797172 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.797182 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:24Z","lastTransitionTime":"2025-12-09T15:09:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:24 crc kubenswrapper[4716]: E1209 15:09:24.813727 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d76960a-8b61-4af0-9ff6-3ba0f6120862\\\",\\\"systemUUID\\\":\\\"9da27605-fb2f-423a-b7bb-978a678a6bed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:24Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.815071 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:24Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.817485 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.817516 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.817526 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.817542 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.817556 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:24Z","lastTransitionTime":"2025-12-09T15:09:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:24 crc kubenswrapper[4716]: E1209 15:09:24.832385 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d76960a-8b61-4af0-9ff6-3ba0f6120862\\\",\\\"systemUUID\\\":\\\"9da27605-fb2f-423a-b7bb-978a678a6bed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:24Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:24 crc kubenswrapper[4716]: E1209 15:09:24.832603 4716 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.833315 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:24Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.834771 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.834809 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.834835 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.834856 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.834869 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:24Z","lastTransitionTime":"2025-12-09T15:09:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.846043 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8clts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df5b4a13-8990-44d5-8b66-a3b696c9774f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11c3246d292d0a1c1ca6f46baf71f93f06b00755a208e4e6fed6cc3653408589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dc6th\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8clts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:24Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.937505 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.937889 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.937970 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.938049 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:24 crc kubenswrapper[4716]: I1209 15:09:24.938109 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:24Z","lastTransitionTime":"2025-12-09T15:09:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.040499 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.040547 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.040561 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.040581 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.040595 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:25Z","lastTransitionTime":"2025-12-09T15:09:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.143508 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.144222 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.144316 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.144411 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.144500 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:25Z","lastTransitionTime":"2025-12-09T15:09:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.213417 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:09:25 crc kubenswrapper[4716]: E1209 15:09:25.213576 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.213905 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:09:25 crc kubenswrapper[4716]: E1209 15:09:25.214051 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.247126 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.247436 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.247503 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.247583 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.247702 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:25Z","lastTransitionTime":"2025-12-09T15:09:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.350520 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.350901 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.350994 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.351104 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.351185 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:25Z","lastTransitionTime":"2025-12-09T15:09:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.453703 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.453745 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.453756 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.453770 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.453779 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:25Z","lastTransitionTime":"2025-12-09T15:09:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.556603 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.556709 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.556736 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.556768 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.556799 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:25Z","lastTransitionTime":"2025-12-09T15:09:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.659614 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.659672 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.659686 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.659708 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.659721 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:25Z","lastTransitionTime":"2025-12-09T15:09:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.762426 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.762469 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.762482 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.762500 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.762514 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:25Z","lastTransitionTime":"2025-12-09T15:09:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.865017 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.865071 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.865083 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.865102 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.865115 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:25Z","lastTransitionTime":"2025-12-09T15:09:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.967587 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.967652 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.967666 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.967686 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:25 crc kubenswrapper[4716]: I1209 15:09:25.967713 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:25Z","lastTransitionTime":"2025-12-09T15:09:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.070213 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.070258 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.070274 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.070295 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.070311 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:26Z","lastTransitionTime":"2025-12-09T15:09:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.172726 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.172772 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.172780 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.172798 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.172811 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:26Z","lastTransitionTime":"2025-12-09T15:09:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.213420 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.213480 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:09:26 crc kubenswrapper[4716]: E1209 15:09:26.213792 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:09:26 crc kubenswrapper[4716]: E1209 15:09:26.213899 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.275287 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.275338 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.275351 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.275368 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.275378 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:26Z","lastTransitionTime":"2025-12-09T15:09:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.378317 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.378356 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.378367 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.378385 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.378396 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:26Z","lastTransitionTime":"2025-12-09T15:09:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.480827 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.480882 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.480895 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.480911 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.480923 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:26Z","lastTransitionTime":"2025-12-09T15:09:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.583666 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.583709 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.583719 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.583736 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.583747 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:26Z","lastTransitionTime":"2025-12-09T15:09:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.662869 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f0e1f1bc-46bd-4293-a9b9-d57c1f83a613-metrics-certs\") pod \"network-metrics-daemon-gpl2n\" (UID: \"f0e1f1bc-46bd-4293-a9b9-d57c1f83a613\") " pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:09:26 crc kubenswrapper[4716]: E1209 15:09:26.662992 4716 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 09 15:09:26 crc kubenswrapper[4716]: E1209 15:09:26.663048 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f0e1f1bc-46bd-4293-a9b9-d57c1f83a613-metrics-certs podName:f0e1f1bc-46bd-4293-a9b9-d57c1f83a613 nodeName:}" failed. No retries permitted until 2025-12-09 15:09:42.663032241 +0000 UTC m=+69.817776229 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f0e1f1bc-46bd-4293-a9b9-d57c1f83a613-metrics-certs") pod "network-metrics-daemon-gpl2n" (UID: "f0e1f1bc-46bd-4293-a9b9-d57c1f83a613") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.686674 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.686716 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.686725 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.686739 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.686748 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:26Z","lastTransitionTime":"2025-12-09T15:09:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.789009 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.789046 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.789059 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.789075 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.789085 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:26Z","lastTransitionTime":"2025-12-09T15:09:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.891407 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.891455 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.891467 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.891483 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.891493 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:26Z","lastTransitionTime":"2025-12-09T15:09:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.994606 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.994706 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.994722 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.994743 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:26 crc kubenswrapper[4716]: I1209 15:09:26.994758 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:26Z","lastTransitionTime":"2025-12-09T15:09:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.097273 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.097333 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.097342 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.097358 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.097368 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:27Z","lastTransitionTime":"2025-12-09T15:09:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.200273 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.200325 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.200334 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.200351 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.200360 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:27Z","lastTransitionTime":"2025-12-09T15:09:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.212668 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:09:27 crc kubenswrapper[4716]: E1209 15:09:27.212787 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.212988 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:09:27 crc kubenswrapper[4716]: E1209 15:09:27.213062 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.303593 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.303673 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.303691 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.303715 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.303731 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:27Z","lastTransitionTime":"2025-12-09T15:09:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.406326 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.406374 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.406386 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.406405 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.406417 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:27Z","lastTransitionTime":"2025-12-09T15:09:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.509173 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.509207 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.509215 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.509230 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.509242 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:27Z","lastTransitionTime":"2025-12-09T15:09:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.611604 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.611684 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.611698 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.611719 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.611733 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:27Z","lastTransitionTime":"2025-12-09T15:09:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.715369 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.715452 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.715464 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.715482 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.715837 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:27Z","lastTransitionTime":"2025-12-09T15:09:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.819196 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.819290 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.819326 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.819357 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.819377 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:27Z","lastTransitionTime":"2025-12-09T15:09:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.921992 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.922044 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.922063 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.922087 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:27 crc kubenswrapper[4716]: I1209 15:09:27.922105 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:27Z","lastTransitionTime":"2025-12-09T15:09:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.025046 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.025124 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.025149 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.025181 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.025205 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:28Z","lastTransitionTime":"2025-12-09T15:09:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.128508 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.128573 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.128601 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.128679 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.128702 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:28Z","lastTransitionTime":"2025-12-09T15:09:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.213450 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.213549 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:09:28 crc kubenswrapper[4716]: E1209 15:09:28.213647 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:09:28 crc kubenswrapper[4716]: E1209 15:09:28.213739 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.231709 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.231747 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.231758 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.231774 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.231785 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:28Z","lastTransitionTime":"2025-12-09T15:09:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.334275 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.334326 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.334338 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.334355 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.334368 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:28Z","lastTransitionTime":"2025-12-09T15:09:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.437158 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.437213 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.437231 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.437252 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.437266 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:28Z","lastTransitionTime":"2025-12-09T15:09:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.540033 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.540075 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.540087 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.540107 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.540119 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:28Z","lastTransitionTime":"2025-12-09T15:09:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.642879 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.642920 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.642928 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.642943 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.642956 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:28Z","lastTransitionTime":"2025-12-09T15:09:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.746675 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.746734 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.746745 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.746763 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.746776 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:28Z","lastTransitionTime":"2025-12-09T15:09:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.850319 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.850399 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.850421 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.850451 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.850474 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:28Z","lastTransitionTime":"2025-12-09T15:09:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.953884 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.953968 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.953991 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.954019 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:28 crc kubenswrapper[4716]: I1209 15:09:28.954039 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:28Z","lastTransitionTime":"2025-12-09T15:09:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.057120 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.057211 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.057238 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.057287 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.057306 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:29Z","lastTransitionTime":"2025-12-09T15:09:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.159704 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.159754 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.159763 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.159782 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.159791 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:29Z","lastTransitionTime":"2025-12-09T15:09:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.212548 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.212603 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:09:29 crc kubenswrapper[4716]: E1209 15:09:29.212713 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:09:29 crc kubenswrapper[4716]: E1209 15:09:29.212806 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.261882 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.261919 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.261928 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.261969 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.261978 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:29Z","lastTransitionTime":"2025-12-09T15:09:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.365167 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.365228 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.365240 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.365258 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.365300 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:29Z","lastTransitionTime":"2025-12-09T15:09:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.468665 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.468721 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.468731 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.468752 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.468766 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:29Z","lastTransitionTime":"2025-12-09T15:09:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.575988 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.576046 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.576062 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.576084 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.576099 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:29Z","lastTransitionTime":"2025-12-09T15:09:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.678751 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.678795 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.678804 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.678852 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.678886 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:29Z","lastTransitionTime":"2025-12-09T15:09:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.782095 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.782169 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.782207 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.782228 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.782239 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:29Z","lastTransitionTime":"2025-12-09T15:09:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.885287 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.885335 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.885344 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.885361 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.885371 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:29Z","lastTransitionTime":"2025-12-09T15:09:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.988672 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.988732 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.988747 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.988769 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:29 crc kubenswrapper[4716]: I1209 15:09:29.988779 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:29Z","lastTransitionTime":"2025-12-09T15:09:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.091164 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.091218 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.091236 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.091256 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.091268 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:30Z","lastTransitionTime":"2025-12-09T15:09:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.194403 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.194468 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.194486 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.194510 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.194529 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:30Z","lastTransitionTime":"2025-12-09T15:09:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.213198 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.213257 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:09:30 crc kubenswrapper[4716]: E1209 15:09:30.213364 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:09:30 crc kubenswrapper[4716]: E1209 15:09:30.213587 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.297264 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.297325 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.297345 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.297368 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.297380 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:30Z","lastTransitionTime":"2025-12-09T15:09:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.400269 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.400325 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.400342 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.400364 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.400382 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:30Z","lastTransitionTime":"2025-12-09T15:09:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.502975 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.503006 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.503014 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.503028 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.503036 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:30Z","lastTransitionTime":"2025-12-09T15:09:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.605861 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.605898 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.605909 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.605925 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.605934 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:30Z","lastTransitionTime":"2025-12-09T15:09:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.708174 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.708219 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.708230 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.708246 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.708259 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:30Z","lastTransitionTime":"2025-12-09T15:09:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.811392 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.811444 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.811456 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.811474 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.811491 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:30Z","lastTransitionTime":"2025-12-09T15:09:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.913961 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.913995 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.914004 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.914018 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:30 crc kubenswrapper[4716]: I1209 15:09:30.914028 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:30Z","lastTransitionTime":"2025-12-09T15:09:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.016900 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.016945 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.016954 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.016969 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.016982 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:31Z","lastTransitionTime":"2025-12-09T15:09:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.118777 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.118842 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.118852 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.118870 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.118881 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:31Z","lastTransitionTime":"2025-12-09T15:09:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.213252 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:09:31 crc kubenswrapper[4716]: E1209 15:09:31.213386 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.213722 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:09:31 crc kubenswrapper[4716]: E1209 15:09:31.213936 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.220819 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.220864 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.220881 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.220902 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.220922 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:31Z","lastTransitionTime":"2025-12-09T15:09:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.323960 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.324027 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.324039 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.324059 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.324073 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:31Z","lastTransitionTime":"2025-12-09T15:09:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.426803 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.426858 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.426873 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.426894 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.426907 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:31Z","lastTransitionTime":"2025-12-09T15:09:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.529970 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.530044 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.530079 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.530108 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.530123 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:31Z","lastTransitionTime":"2025-12-09T15:09:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.633221 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.633305 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.633328 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.633360 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.633383 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:31Z","lastTransitionTime":"2025-12-09T15:09:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.735763 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.735826 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.735839 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.735857 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.735869 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:31Z","lastTransitionTime":"2025-12-09T15:09:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.839001 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.839095 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.839120 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.839168 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.839197 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:31Z","lastTransitionTime":"2025-12-09T15:09:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.941908 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.941966 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.941977 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.941993 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:31 crc kubenswrapper[4716]: I1209 15:09:31.942003 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:31Z","lastTransitionTime":"2025-12-09T15:09:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.044612 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.044696 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.044707 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.044723 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.044735 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:32Z","lastTransitionTime":"2025-12-09T15:09:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.148677 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.148722 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.148734 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.148754 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.148765 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:32Z","lastTransitionTime":"2025-12-09T15:09:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.213575 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.213598 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:09:32 crc kubenswrapper[4716]: E1209 15:09:32.213834 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:09:32 crc kubenswrapper[4716]: E1209 15:09:32.213987 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.252041 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.252120 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.252132 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.252150 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.252162 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:32Z","lastTransitionTime":"2025-12-09T15:09:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.354283 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.354328 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.354339 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.354358 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.354372 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:32Z","lastTransitionTime":"2025-12-09T15:09:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.457250 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.457290 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.457299 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.457316 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.457325 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:32Z","lastTransitionTime":"2025-12-09T15:09:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.560969 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.561024 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.561038 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.561060 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.561073 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:32Z","lastTransitionTime":"2025-12-09T15:09:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.664720 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.664778 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.664805 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.664830 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.664847 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:32Z","lastTransitionTime":"2025-12-09T15:09:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.766838 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.767282 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.767439 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.767582 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.767873 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:32Z","lastTransitionTime":"2025-12-09T15:09:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.871238 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.871300 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.871315 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.871347 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.871363 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:32Z","lastTransitionTime":"2025-12-09T15:09:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.973851 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.973897 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.973910 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.973928 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:32 crc kubenswrapper[4716]: I1209 15:09:32.973938 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:32Z","lastTransitionTime":"2025-12-09T15:09:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.077003 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.077044 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.077056 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.077075 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.077088 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:33Z","lastTransitionTime":"2025-12-09T15:09:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.178658 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.178695 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.178711 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.178729 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.178740 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:33Z","lastTransitionTime":"2025-12-09T15:09:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.213230 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.213288 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:09:33 crc kubenswrapper[4716]: E1209 15:09:33.213483 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:09:33 crc kubenswrapper[4716]: E1209 15:09:33.213716 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.240688 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d953045-e94a-4e04-b78e-bc20b3a8c36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86ae7284d29b1843f9a37270d53192c39c9d582f480af9df7cec9ae6a027f771\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ae7284d29b1843f9a37270d53192c39c9d582f480af9df7cec9ae6a027f771\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T15:09:23Z\\\",\\\"message\\\":\\\"map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 15:09:22.997308 6368 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 15:09:22.997228 6368 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1209 15:09:22.997410 6368 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1209 15:09:22.997445 6368 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:22Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hcdn4_openshift-ovn-kubernetes(3d953045-e94a-4e04-b78e-bc20b3a8c36c)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hcdn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:33Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.255681 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rqz4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-plvv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rqz4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:33Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.266810 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef5255ac80d96695612cdac86e63f82688271183d2daa61af9122a1bdd801089\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rdkb2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:33Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.277486 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff607c0e-2a04-4599-b107-8ccbfed1d376\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fada1f30125149bddf6047b764fcdbbecce679857a66b8f2eaa7c00d84a5b9d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c9ead9bb35730e5bec0cd95c265e4918a0dc3dd1dd0330a27316de4f3901108\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:09:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7wtn7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:33Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.281012 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.281044 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.281052 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.281067 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.281077 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:33Z","lastTransitionTime":"2025-12-09T15:09:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.292217 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:33Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.307162 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:33Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.319416 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:33Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.330503 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-q8f5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92bbf960-39e4-4521-9ba9-c66d302ceb3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2496e752c40e1c175f4ea3321eef81439ae0cd8b988efddf694e31b517d47d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fr45r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-q8f5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:33Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.342147 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:33Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.357653 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f66b28-96d7-4c4f-b567-7f0917812e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05b0adb7ab9ef293fc4852b947c3c00832260edfb0472d0477e3c529cef5a814\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qrqxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:33Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.369296 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gpl2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0e1f1bc-46bd-4293-a9b9-d57c1f83a613\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wxrl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wxrl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:09:10Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gpl2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:33Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.383141 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.383203 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.383215 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.383248 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.383258 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:33Z","lastTransitionTime":"2025-12-09T15:09:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.384392 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:33Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.395928 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8142ea19-bdab-43d4-b6ec-0eb2dea21b60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51c336339c165a67392862787d5ffdab4fba030a838bcb28cecb6ef0a1b1633a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4977d7373e96d2e9d86ee20a1dadffb84228e1a466bce24e5b5af6c1c6ec93b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4704af0bc981b22b47ed768b34abe27a70ec9445a3b86cbe81e44115c1e6db55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8fe87f75d755671ee24326254af036cce1ac0ef201535b3eb56fde3c3dfea65a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8fe87f75d755671ee24326254af036cce1ac0ef201535b3eb56fde3c3dfea65a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:33Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.412419 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:33Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.425273 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:33Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.437637 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:33Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.448158 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8clts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df5b4a13-8990-44d5-8b66-a3b696c9774f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11c3246d292d0a1c1ca6f46baf71f93f06b00755a208e4e6fed6cc3653408589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dc6th\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8clts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:33Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.485287 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.485328 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.485345 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.485363 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.485376 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:33Z","lastTransitionTime":"2025-12-09T15:09:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.587996 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.588041 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.588056 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.588075 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.588089 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:33Z","lastTransitionTime":"2025-12-09T15:09:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.690934 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.690989 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.691005 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.691030 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.691048 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:33Z","lastTransitionTime":"2025-12-09T15:09:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.793691 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.793755 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.793767 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.793788 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.793802 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:33Z","lastTransitionTime":"2025-12-09T15:09:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.896484 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.896901 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.897137 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.897369 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:33 crc kubenswrapper[4716]: I1209 15:09:33.897644 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:33Z","lastTransitionTime":"2025-12-09T15:09:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.000786 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.001475 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.001695 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.001831 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.001951 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:34Z","lastTransitionTime":"2025-12-09T15:09:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.104206 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.104546 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.104637 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.104731 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.104797 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:34Z","lastTransitionTime":"2025-12-09T15:09:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.207336 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.207383 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.207398 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.207418 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.207434 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:34Z","lastTransitionTime":"2025-12-09T15:09:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.212792 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.212880 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:09:34 crc kubenswrapper[4716]: E1209 15:09:34.212943 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:09:34 crc kubenswrapper[4716]: E1209 15:09:34.213065 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.310106 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.310162 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.310178 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.310199 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.310214 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:34Z","lastTransitionTime":"2025-12-09T15:09:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.413230 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.413602 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.413728 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.414064 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.414142 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:34Z","lastTransitionTime":"2025-12-09T15:09:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.516668 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.516704 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.516715 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.516732 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.516743 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:34Z","lastTransitionTime":"2025-12-09T15:09:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.619743 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.620285 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.620475 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.620718 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.620943 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:34Z","lastTransitionTime":"2025-12-09T15:09:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.724120 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.724188 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.724212 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.724242 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.724269 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:34Z","lastTransitionTime":"2025-12-09T15:09:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.827298 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.827377 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.827402 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.827434 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.827457 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:34Z","lastTransitionTime":"2025-12-09T15:09:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.930109 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.930165 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.930183 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.930204 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:34 crc kubenswrapper[4716]: I1209 15:09:34.930220 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:34Z","lastTransitionTime":"2025-12-09T15:09:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.033500 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.033605 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.033662 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.033690 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.033710 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:35Z","lastTransitionTime":"2025-12-09T15:09:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.123964 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.124006 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.124018 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.124036 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.124050 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:35Z","lastTransitionTime":"2025-12-09T15:09:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:35 crc kubenswrapper[4716]: E1209 15:09:35.141934 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d76960a-8b61-4af0-9ff6-3ba0f6120862\\\",\\\"systemUUID\\\":\\\"9da27605-fb2f-423a-b7bb-978a678a6bed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:35Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.147507 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.147589 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.147605 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.147665 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.147685 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:35Z","lastTransitionTime":"2025-12-09T15:09:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:35 crc kubenswrapper[4716]: E1209 15:09:35.163734 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d76960a-8b61-4af0-9ff6-3ba0f6120862\\\",\\\"systemUUID\\\":\\\"9da27605-fb2f-423a-b7bb-978a678a6bed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:35Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.168145 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.168202 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.168216 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.168233 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.168247 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:35Z","lastTransitionTime":"2025-12-09T15:09:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:35 crc kubenswrapper[4716]: E1209 15:09:35.184937 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d76960a-8b61-4af0-9ff6-3ba0f6120862\\\",\\\"systemUUID\\\":\\\"9da27605-fb2f-423a-b7bb-978a678a6bed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:35Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.189068 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.189098 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.189126 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.189142 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.189151 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:35Z","lastTransitionTime":"2025-12-09T15:09:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:35 crc kubenswrapper[4716]: E1209 15:09:35.200969 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d76960a-8b61-4af0-9ff6-3ba0f6120862\\\",\\\"systemUUID\\\":\\\"9da27605-fb2f-423a-b7bb-978a678a6bed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:35Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.204273 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.204334 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.204346 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.204363 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.204376 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:35Z","lastTransitionTime":"2025-12-09T15:09:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.213578 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.213728 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:09:35 crc kubenswrapper[4716]: E1209 15:09:35.213879 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:09:35 crc kubenswrapper[4716]: E1209 15:09:35.213982 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:09:35 crc kubenswrapper[4716]: E1209 15:09:35.219018 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d76960a-8b61-4af0-9ff6-3ba0f6120862\\\",\\\"systemUUID\\\":\\\"9da27605-fb2f-423a-b7bb-978a678a6bed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:35Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:35 crc kubenswrapper[4716]: E1209 15:09:35.219319 4716 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.221128 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.221165 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.221197 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.221217 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.221229 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:35Z","lastTransitionTime":"2025-12-09T15:09:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.323989 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.324036 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.324046 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.324060 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.324070 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:35Z","lastTransitionTime":"2025-12-09T15:09:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.429250 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.429315 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.429327 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.429350 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.429363 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:35Z","lastTransitionTime":"2025-12-09T15:09:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.532323 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.532403 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.532428 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.532462 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.532485 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:35Z","lastTransitionTime":"2025-12-09T15:09:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.635146 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.635208 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.635223 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.635239 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.635254 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:35Z","lastTransitionTime":"2025-12-09T15:09:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.737403 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.737463 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.737481 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.737503 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.737522 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:35Z","lastTransitionTime":"2025-12-09T15:09:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.840732 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.840834 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.840879 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.840915 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.840942 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:35Z","lastTransitionTime":"2025-12-09T15:09:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.943035 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.943074 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.943086 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.943105 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:35 crc kubenswrapper[4716]: I1209 15:09:35.943117 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:35Z","lastTransitionTime":"2025-12-09T15:09:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.046229 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.046917 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.046964 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.046990 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.047002 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:36Z","lastTransitionTime":"2025-12-09T15:09:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.150043 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.150116 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.150133 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.150160 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.150177 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:36Z","lastTransitionTime":"2025-12-09T15:09:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.212802 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.213218 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:09:36 crc kubenswrapper[4716]: E1209 15:09:36.213444 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:09:36 crc kubenswrapper[4716]: E1209 15:09:36.213535 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.213672 4716 scope.go:117] "RemoveContainer" containerID="86ae7284d29b1843f9a37270d53192c39c9d582f480af9df7cec9ae6a027f771" Dec 09 15:09:36 crc kubenswrapper[4716]: E1209 15:09:36.213903 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hcdn4_openshift-ovn-kubernetes(3d953045-e94a-4e04-b78e-bc20b3a8c36c)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.252198 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.252249 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.252260 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.252276 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.252284 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:36Z","lastTransitionTime":"2025-12-09T15:09:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.355585 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.355667 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.355685 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.355704 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.355716 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:36Z","lastTransitionTime":"2025-12-09T15:09:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.458568 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.458616 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.458644 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.458663 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.458677 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:36Z","lastTransitionTime":"2025-12-09T15:09:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.561253 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.561297 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.561311 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.561328 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.561339 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:36Z","lastTransitionTime":"2025-12-09T15:09:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.663647 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.663689 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.663699 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.663716 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.663725 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:36Z","lastTransitionTime":"2025-12-09T15:09:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.766984 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.767031 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.767039 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.767056 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.767065 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:36Z","lastTransitionTime":"2025-12-09T15:09:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.869932 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.869972 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.869985 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.870003 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.870014 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:36Z","lastTransitionTime":"2025-12-09T15:09:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.972385 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.972452 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.972465 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.972488 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:36 crc kubenswrapper[4716]: I1209 15:09:36.972501 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:36Z","lastTransitionTime":"2025-12-09T15:09:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.074579 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.074642 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.074657 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.074675 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.074687 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:37Z","lastTransitionTime":"2025-12-09T15:09:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.177693 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.177783 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.177834 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.177863 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.177883 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:37Z","lastTransitionTime":"2025-12-09T15:09:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.212747 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.212780 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:09:37 crc kubenswrapper[4716]: E1209 15:09:37.212898 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:09:37 crc kubenswrapper[4716]: E1209 15:09:37.213124 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.280424 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.280474 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.280504 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.280526 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.280540 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:37Z","lastTransitionTime":"2025-12-09T15:09:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.383196 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.383254 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.383268 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.383288 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.383323 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:37Z","lastTransitionTime":"2025-12-09T15:09:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.485851 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.485941 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.485957 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.485995 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.486007 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:37Z","lastTransitionTime":"2025-12-09T15:09:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.587835 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.587866 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.587875 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.587889 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.587897 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:37Z","lastTransitionTime":"2025-12-09T15:09:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.690394 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.690454 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.690469 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.690488 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.690498 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:37Z","lastTransitionTime":"2025-12-09T15:09:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.792833 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.792887 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.792922 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.792943 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.792957 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:37Z","lastTransitionTime":"2025-12-09T15:09:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.895555 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.895595 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.895603 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.895638 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.895649 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:37Z","lastTransitionTime":"2025-12-09T15:09:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.999015 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.999048 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.999059 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.999072 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:37 crc kubenswrapper[4716]: I1209 15:09:37.999080 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:37Z","lastTransitionTime":"2025-12-09T15:09:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.101941 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.101985 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.101997 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.102015 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.102030 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:38Z","lastTransitionTime":"2025-12-09T15:09:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.204150 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.204182 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.204194 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.204207 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.204217 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:38Z","lastTransitionTime":"2025-12-09T15:09:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.212498 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:09:38 crc kubenswrapper[4716]: E1209 15:09:38.212632 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.212682 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:09:38 crc kubenswrapper[4716]: E1209 15:09:38.212801 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.306243 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.306271 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.306281 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.306295 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.306304 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:38Z","lastTransitionTime":"2025-12-09T15:09:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.409252 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.409288 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.409296 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.409312 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.409321 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:38Z","lastTransitionTime":"2025-12-09T15:09:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.511808 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.511851 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.511862 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.511877 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.511888 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:38Z","lastTransitionTime":"2025-12-09T15:09:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.614525 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.614576 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.614588 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.614605 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.614615 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:38Z","lastTransitionTime":"2025-12-09T15:09:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.717300 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.717332 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.717340 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.717353 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.717362 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:38Z","lastTransitionTime":"2025-12-09T15:09:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.819959 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.820000 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.820010 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.820025 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.820034 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:38Z","lastTransitionTime":"2025-12-09T15:09:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.922233 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.922267 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.922276 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.922290 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:38 crc kubenswrapper[4716]: I1209 15:09:38.922298 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:38Z","lastTransitionTime":"2025-12-09T15:09:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.024749 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.024797 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.024810 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.024827 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.024843 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:39Z","lastTransitionTime":"2025-12-09T15:09:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.127024 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.127065 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.127076 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.127095 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.127105 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:39Z","lastTransitionTime":"2025-12-09T15:09:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.212927 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.212959 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:09:39 crc kubenswrapper[4716]: E1209 15:09:39.213067 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:09:39 crc kubenswrapper[4716]: E1209 15:09:39.213219 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.229862 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.229952 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.229965 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.229989 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.230002 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:39Z","lastTransitionTime":"2025-12-09T15:09:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.332975 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.333024 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.333035 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.333051 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.333061 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:39Z","lastTransitionTime":"2025-12-09T15:09:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.435719 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.435773 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.435783 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.435796 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.435807 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:39Z","lastTransitionTime":"2025-12-09T15:09:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.538227 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.538271 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.538284 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.538304 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.538329 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:39Z","lastTransitionTime":"2025-12-09T15:09:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.640500 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.640544 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.640555 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.640573 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.640584 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:39Z","lastTransitionTime":"2025-12-09T15:09:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.743447 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.743497 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.743515 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.743533 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.743545 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:39Z","lastTransitionTime":"2025-12-09T15:09:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.846074 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.846116 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.846125 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.846143 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.846153 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:39Z","lastTransitionTime":"2025-12-09T15:09:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.948361 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.948433 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.948444 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.948462 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:39 crc kubenswrapper[4716]: I1209 15:09:39.948471 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:39Z","lastTransitionTime":"2025-12-09T15:09:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.051701 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.051763 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.051777 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.051797 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.051812 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:40Z","lastTransitionTime":"2025-12-09T15:09:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.154100 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.154146 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.154155 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.154171 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.154188 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:40Z","lastTransitionTime":"2025-12-09T15:09:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.213197 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.213259 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:09:40 crc kubenswrapper[4716]: E1209 15:09:40.213376 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:09:40 crc kubenswrapper[4716]: E1209 15:09:40.213544 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.257036 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.257093 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.257106 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.257123 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.257135 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:40Z","lastTransitionTime":"2025-12-09T15:09:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.360245 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.360295 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.360305 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.360321 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.360331 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:40Z","lastTransitionTime":"2025-12-09T15:09:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.462280 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.462327 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.462355 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.462373 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.462384 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:40Z","lastTransitionTime":"2025-12-09T15:09:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.566072 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.566129 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.566146 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.566165 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.566177 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:40Z","lastTransitionTime":"2025-12-09T15:09:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.668983 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.669065 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.669079 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.669101 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.669115 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:40Z","lastTransitionTime":"2025-12-09T15:09:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.771495 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.771567 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.771585 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.771603 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.771613 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:40Z","lastTransitionTime":"2025-12-09T15:09:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.874052 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.874100 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.874110 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.874126 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.874135 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:40Z","lastTransitionTime":"2025-12-09T15:09:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.977452 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.977504 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.977515 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.977534 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:40 crc kubenswrapper[4716]: I1209 15:09:40.977546 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:40Z","lastTransitionTime":"2025-12-09T15:09:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.080082 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.080133 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.080142 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.080161 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.080172 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:41Z","lastTransitionTime":"2025-12-09T15:09:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.183336 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.183386 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.183398 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.183416 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.183427 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:41Z","lastTransitionTime":"2025-12-09T15:09:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.213576 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.213752 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:09:41 crc kubenswrapper[4716]: E1209 15:09:41.213759 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:09:41 crc kubenswrapper[4716]: E1209 15:09:41.213972 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.285833 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.285896 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.285912 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.285931 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.285943 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:41Z","lastTransitionTime":"2025-12-09T15:09:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.389899 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.389941 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.389951 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.389965 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.389989 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:41Z","lastTransitionTime":"2025-12-09T15:09:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.493665 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.493725 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.493737 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.493754 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.493778 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:41Z","lastTransitionTime":"2025-12-09T15:09:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.596596 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.596662 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.596683 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.596708 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.596721 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:41Z","lastTransitionTime":"2025-12-09T15:09:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.699074 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.699110 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.699120 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.699134 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.699143 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:41Z","lastTransitionTime":"2025-12-09T15:09:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.801311 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.801392 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.801413 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.801450 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.801464 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:41Z","lastTransitionTime":"2025-12-09T15:09:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.903380 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.903416 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.903424 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.903438 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:41 crc kubenswrapper[4716]: I1209 15:09:41.903447 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:41Z","lastTransitionTime":"2025-12-09T15:09:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.006655 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.006704 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.006716 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.006737 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.006750 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:42Z","lastTransitionTime":"2025-12-09T15:09:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.109950 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.109999 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.110009 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.110027 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.110039 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:42Z","lastTransitionTime":"2025-12-09T15:09:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.212678 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.212697 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:09:42 crc kubenswrapper[4716]: E1209 15:09:42.212819 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:09:42 crc kubenswrapper[4716]: E1209 15:09:42.212939 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.212953 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.212991 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.213000 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.213019 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.213028 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:42Z","lastTransitionTime":"2025-12-09T15:09:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.316241 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.316282 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.316291 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.316306 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.316317 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:42Z","lastTransitionTime":"2025-12-09T15:09:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.418972 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.419023 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.419034 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.419053 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.419066 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:42Z","lastTransitionTime":"2025-12-09T15:09:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.521600 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.521656 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.521666 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.521680 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.521690 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:42Z","lastTransitionTime":"2025-12-09T15:09:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.616535 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rqz4n_38b4e174-ba72-4a0f-9eed-f2ce970c0afc/kube-multus/0.log" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.616583 4716 generic.go:334] "Generic (PLEG): container finished" podID="38b4e174-ba72-4a0f-9eed-f2ce970c0afc" containerID="b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4" exitCode=1 Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.616614 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rqz4n" event={"ID":"38b4e174-ba72-4a0f-9eed-f2ce970c0afc","Type":"ContainerDied","Data":"b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4"} Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.617033 4716 scope.go:117] "RemoveContainer" containerID="b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.627724 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.627815 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.627833 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.627853 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.627867 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:42Z","lastTransitionTime":"2025-12-09T15:09:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.635780 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d953045-e94a-4e04-b78e-bc20b3a8c36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86ae7284d29b1843f9a37270d53192c39c9d582f480af9df7cec9ae6a027f771\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ae7284d29b1843f9a37270d53192c39c9d582f480af9df7cec9ae6a027f771\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T15:09:23Z\\\",\\\"message\\\":\\\"map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 15:09:22.997308 6368 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 15:09:22.997228 6368 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1209 15:09:22.997410 6368 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1209 15:09:22.997445 6368 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:22Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hcdn4_openshift-ovn-kubernetes(3d953045-e94a-4e04-b78e-bc20b3a8c36c)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hcdn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:42Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.650160 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rqz4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T15:09:42Z\\\",\\\"message\\\":\\\"2025-12-09T15:08:57+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_d33c09ce-c19a-48ac-9883-f562acb4fa1a\\\\n2025-12-09T15:08:57+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_d33c09ce-c19a-48ac-9883-f562acb4fa1a to /host/opt/cni/bin/\\\\n2025-12-09T15:08:57Z [verbose] multus-daemon started\\\\n2025-12-09T15:08:57Z [verbose] Readiness Indicator file check\\\\n2025-12-09T15:09:42Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-plvv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rqz4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:42Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.663194 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f0e1f1bc-46bd-4293-a9b9-d57c1f83a613-metrics-certs\") pod \"network-metrics-daemon-gpl2n\" (UID: \"f0e1f1bc-46bd-4293-a9b9-d57c1f83a613\") " pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:09:42 crc kubenswrapper[4716]: E1209 15:09:42.663329 4716 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 09 15:09:42 crc kubenswrapper[4716]: E1209 15:09:42.663399 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f0e1f1bc-46bd-4293-a9b9-d57c1f83a613-metrics-certs podName:f0e1f1bc-46bd-4293-a9b9-d57c1f83a613 nodeName:}" failed. No retries permitted until 2025-12-09 15:10:14.663380023 +0000 UTC m=+101.818124011 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f0e1f1bc-46bd-4293-a9b9-d57c1f83a613-metrics-certs") pod "network-metrics-daemon-gpl2n" (UID: "f0e1f1bc-46bd-4293-a9b9-d57c1f83a613") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.663530 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef5255ac80d96695612cdac86e63f82688271183d2daa61af9122a1bdd801089\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rdkb2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:42Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.675465 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff607c0e-2a04-4599-b107-8ccbfed1d376\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fada1f30125149bddf6047b764fcdbbecce679857a66b8f2eaa7c00d84a5b9d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c9ead9bb35730e5bec0cd95c265e4918a0dc3dd1dd0330a27316de4f3901108\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:09:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7wtn7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:42Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.689070 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:42Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.703871 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:42Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.717518 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:42Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.729790 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-q8f5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92bbf960-39e4-4521-9ba9-c66d302ceb3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2496e752c40e1c175f4ea3321eef81439ae0cd8b988efddf694e31b517d47d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fr45r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-q8f5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:42Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.730757 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.730852 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.730862 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.730877 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.730887 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:42Z","lastTransitionTime":"2025-12-09T15:09:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.743371 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:42Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.757763 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f66b28-96d7-4c4f-b567-7f0917812e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05b0adb7ab9ef293fc4852b947c3c00832260edfb0472d0477e3c529cef5a814\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qrqxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:42Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.769389 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gpl2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0e1f1bc-46bd-4293-a9b9-d57c1f83a613\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wxrl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wxrl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:09:10Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gpl2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:42Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.782512 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:42Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.795377 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8142ea19-bdab-43d4-b6ec-0eb2dea21b60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51c336339c165a67392862787d5ffdab4fba030a838bcb28cecb6ef0a1b1633a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4977d7373e96d2e9d86ee20a1dadffb84228e1a466bce24e5b5af6c1c6ec93b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4704af0bc981b22b47ed768b34abe27a70ec9445a3b86cbe81e44115c1e6db55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8fe87f75d755671ee24326254af036cce1ac0ef201535b3eb56fde3c3dfea65a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8fe87f75d755671ee24326254af036cce1ac0ef201535b3eb56fde3c3dfea65a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:42Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.809985 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:42Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.823155 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:42Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.833068 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.833121 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.833132 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.833148 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.833161 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:42Z","lastTransitionTime":"2025-12-09T15:09:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.837287 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:42Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.849516 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8clts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df5b4a13-8990-44d5-8b66-a3b696c9774f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11c3246d292d0a1c1ca6f46baf71f93f06b00755a208e4e6fed6cc3653408589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dc6th\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8clts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:42Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.935436 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.935483 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.935496 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.935510 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:42 crc kubenswrapper[4716]: I1209 15:09:42.935520 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:42Z","lastTransitionTime":"2025-12-09T15:09:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.038453 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.038499 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.038508 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.038524 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.038533 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:43Z","lastTransitionTime":"2025-12-09T15:09:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.141095 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.141154 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.141167 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.141186 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.141497 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:43Z","lastTransitionTime":"2025-12-09T15:09:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.213253 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.213348 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:09:43 crc kubenswrapper[4716]: E1209 15:09:43.213598 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:09:43 crc kubenswrapper[4716]: E1209 15:09:43.213807 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.227733 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:43Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.241063 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:43Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.244298 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.244354 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.244367 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.244386 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.244398 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:43Z","lastTransitionTime":"2025-12-09T15:09:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.256112 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:43Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.268682 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-q8f5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92bbf960-39e4-4521-9ba9-c66d302ceb3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2496e752c40e1c175f4ea3321eef81439ae0cd8b988efddf694e31b517d47d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fr45r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-q8f5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:43Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.283641 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rqz4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T15:09:42Z\\\",\\\"message\\\":\\\"2025-12-09T15:08:57+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_d33c09ce-c19a-48ac-9883-f562acb4fa1a\\\\n2025-12-09T15:08:57+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_d33c09ce-c19a-48ac-9883-f562acb4fa1a to /host/opt/cni/bin/\\\\n2025-12-09T15:08:57Z [verbose] multus-daemon started\\\\n2025-12-09T15:08:57Z [verbose] Readiness Indicator file check\\\\n2025-12-09T15:09:42Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-plvv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rqz4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:43Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.298069 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef5255ac80d96695612cdac86e63f82688271183d2daa61af9122a1bdd801089\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rdkb2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:43Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.313659 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff607c0e-2a04-4599-b107-8ccbfed1d376\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fada1f30125149bddf6047b764fcdbbecce679857a66b8f2eaa7c00d84a5b9d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c9ead9bb35730e5bec0cd95c265e4918a0dc3dd1dd0330a27316de4f3901108\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:09:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7wtn7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:43Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.328130 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:43Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.340780 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8142ea19-bdab-43d4-b6ec-0eb2dea21b60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51c336339c165a67392862787d5ffdab4fba030a838bcb28cecb6ef0a1b1633a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4977d7373e96d2e9d86ee20a1dadffb84228e1a466bce24e5b5af6c1c6ec93b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4704af0bc981b22b47ed768b34abe27a70ec9445a3b86cbe81e44115c1e6db55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8fe87f75d755671ee24326254af036cce1ac0ef201535b3eb56fde3c3dfea65a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8fe87f75d755671ee24326254af036cce1ac0ef201535b3eb56fde3c3dfea65a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:43Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.346872 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.346947 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.346963 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.346986 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.347001 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:43Z","lastTransitionTime":"2025-12-09T15:09:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.354870 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:43Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.370446 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:43Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.383838 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:43Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.399744 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f66b28-96d7-4c4f-b567-7f0917812e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05b0adb7ab9ef293fc4852b947c3c00832260edfb0472d0477e3c529cef5a814\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qrqxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:43Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.411608 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gpl2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0e1f1bc-46bd-4293-a9b9-d57c1f83a613\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wxrl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wxrl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:09:10Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gpl2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:43Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.423254 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:43Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.432758 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8clts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df5b4a13-8990-44d5-8b66-a3b696c9774f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11c3246d292d0a1c1ca6f46baf71f93f06b00755a208e4e6fed6cc3653408589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dc6th\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8clts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:43Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.449268 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.449316 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.449329 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.449347 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.449360 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:43Z","lastTransitionTime":"2025-12-09T15:09:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.458936 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d953045-e94a-4e04-b78e-bc20b3a8c36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86ae7284d29b1843f9a37270d53192c39c9d582f480af9df7cec9ae6a027f771\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ae7284d29b1843f9a37270d53192c39c9d582f480af9df7cec9ae6a027f771\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T15:09:23Z\\\",\\\"message\\\":\\\"map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 15:09:22.997308 6368 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 15:09:22.997228 6368 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1209 15:09:22.997410 6368 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1209 15:09:22.997445 6368 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:22Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hcdn4_openshift-ovn-kubernetes(3d953045-e94a-4e04-b78e-bc20b3a8c36c)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hcdn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:43Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.551304 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.551339 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.551348 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.551362 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.551371 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:43Z","lastTransitionTime":"2025-12-09T15:09:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.622330 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rqz4n_38b4e174-ba72-4a0f-9eed-f2ce970c0afc/kube-multus/0.log" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.622416 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rqz4n" event={"ID":"38b4e174-ba72-4a0f-9eed-f2ce970c0afc","Type":"ContainerStarted","Data":"a2fd13c7fc5ee8080d3513f100512c1ebd48490bbe1f36ff53396b7aa3d9526b"} Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.634090 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8clts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df5b4a13-8990-44d5-8b66-a3b696c9774f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11c3246d292d0a1c1ca6f46baf71f93f06b00755a208e4e6fed6cc3653408589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dc6th\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8clts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:43Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.648335 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:43Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.653856 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.654319 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.654406 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.654487 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.654584 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:43Z","lastTransitionTime":"2025-12-09T15:09:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.672146 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d953045-e94a-4e04-b78e-bc20b3a8c36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86ae7284d29b1843f9a37270d53192c39c9d582f480af9df7cec9ae6a027f771\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ae7284d29b1843f9a37270d53192c39c9d582f480af9df7cec9ae6a027f771\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T15:09:23Z\\\",\\\"message\\\":\\\"map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 15:09:22.997308 6368 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 15:09:22.997228 6368 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1209 15:09:22.997410 6368 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1209 15:09:22.997445 6368 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:22Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hcdn4_openshift-ovn-kubernetes(3d953045-e94a-4e04-b78e-bc20b3a8c36c)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hcdn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:43Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.686403 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:43Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.698310 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-q8f5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92bbf960-39e4-4521-9ba9-c66d302ceb3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2496e752c40e1c175f4ea3321eef81439ae0cd8b988efddf694e31b517d47d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fr45r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-q8f5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:43Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.714417 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rqz4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2fd13c7fc5ee8080d3513f100512c1ebd48490bbe1f36ff53396b7aa3d9526b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T15:09:42Z\\\",\\\"message\\\":\\\"2025-12-09T15:08:57+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_d33c09ce-c19a-48ac-9883-f562acb4fa1a\\\\n2025-12-09T15:08:57+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_d33c09ce-c19a-48ac-9883-f562acb4fa1a to /host/opt/cni/bin/\\\\n2025-12-09T15:08:57Z [verbose] multus-daemon started\\\\n2025-12-09T15:08:57Z [verbose] Readiness Indicator file check\\\\n2025-12-09T15:09:42Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-plvv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rqz4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:43Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.727432 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef5255ac80d96695612cdac86e63f82688271183d2daa61af9122a1bdd801089\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rdkb2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:43Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.738925 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff607c0e-2a04-4599-b107-8ccbfed1d376\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fada1f30125149bddf6047b764fcdbbecce679857a66b8f2eaa7c00d84a5b9d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c9ead9bb35730e5bec0cd95c265e4918a0dc3dd1dd0330a27316de4f3901108\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:09:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7wtn7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:43Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.753991 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:43Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.757018 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.757503 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.757607 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.757722 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.757809 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:43Z","lastTransitionTime":"2025-12-09T15:09:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.768014 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:43Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.781487 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:43Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.797484 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:43Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.809053 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:43Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.823994 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f66b28-96d7-4c4f-b567-7f0917812e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05b0adb7ab9ef293fc4852b947c3c00832260edfb0472d0477e3c529cef5a814\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qrqxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:43Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.837457 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gpl2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0e1f1bc-46bd-4293-a9b9-d57c1f83a613\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wxrl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wxrl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:09:10Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gpl2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:43Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.851174 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:43Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.860478 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.860558 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.860580 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.860611 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.860676 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:43Z","lastTransitionTime":"2025-12-09T15:09:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.862827 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8142ea19-bdab-43d4-b6ec-0eb2dea21b60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51c336339c165a67392862787d5ffdab4fba030a838bcb28cecb6ef0a1b1633a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4977d7373e96d2e9d86ee20a1dadffb84228e1a466bce24e5b5af6c1c6ec93b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4704af0bc981b22b47ed768b34abe27a70ec9445a3b86cbe81e44115c1e6db55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8fe87f75d755671ee24326254af036cce1ac0ef201535b3eb56fde3c3dfea65a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8fe87f75d755671ee24326254af036cce1ac0ef201535b3eb56fde3c3dfea65a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:43Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.963544 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.963593 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.963609 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.963647 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:43 crc kubenswrapper[4716]: I1209 15:09:43.963663 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:43Z","lastTransitionTime":"2025-12-09T15:09:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.066774 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.066838 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.066852 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.066873 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.066886 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:44Z","lastTransitionTime":"2025-12-09T15:09:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.169749 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.169793 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.169805 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.169822 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.169833 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:44Z","lastTransitionTime":"2025-12-09T15:09:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.213530 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.213611 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:09:44 crc kubenswrapper[4716]: E1209 15:09:44.213687 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:09:44 crc kubenswrapper[4716]: E1209 15:09:44.213760 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.272539 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.272919 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.273019 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.273130 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.273227 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:44Z","lastTransitionTime":"2025-12-09T15:09:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.376006 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.376148 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.376165 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.376186 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.376207 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:44Z","lastTransitionTime":"2025-12-09T15:09:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.479315 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.479375 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.479388 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.479409 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.479423 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:44Z","lastTransitionTime":"2025-12-09T15:09:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.582008 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.582639 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.582737 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.582829 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.582909 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:44Z","lastTransitionTime":"2025-12-09T15:09:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.685258 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.685634 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.685714 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.685846 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.685927 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:44Z","lastTransitionTime":"2025-12-09T15:09:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.787968 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.787998 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.788006 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.788020 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.788028 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:44Z","lastTransitionTime":"2025-12-09T15:09:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.890059 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.890094 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.890106 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.890123 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.890133 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:44Z","lastTransitionTime":"2025-12-09T15:09:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.992716 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.992760 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.992768 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.992782 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:44 crc kubenswrapper[4716]: I1209 15:09:44.992790 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:44Z","lastTransitionTime":"2025-12-09T15:09:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.094554 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.094597 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.094609 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.094641 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.094652 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:45Z","lastTransitionTime":"2025-12-09T15:09:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.197438 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.197481 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.197489 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.197504 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.197513 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:45Z","lastTransitionTime":"2025-12-09T15:09:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.212709 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.212790 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:09:45 crc kubenswrapper[4716]: E1209 15:09:45.212836 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:09:45 crc kubenswrapper[4716]: E1209 15:09:45.212927 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.241109 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.241144 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.241154 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.241168 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.241178 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:45Z","lastTransitionTime":"2025-12-09T15:09:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:45 crc kubenswrapper[4716]: E1209 15:09:45.254477 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d76960a-8b61-4af0-9ff6-3ba0f6120862\\\",\\\"systemUUID\\\":\\\"9da27605-fb2f-423a-b7bb-978a678a6bed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:45Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.258575 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.258605 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.258614 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.258644 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.258656 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:45Z","lastTransitionTime":"2025-12-09T15:09:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:45 crc kubenswrapper[4716]: E1209 15:09:45.270312 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d76960a-8b61-4af0-9ff6-3ba0f6120862\\\",\\\"systemUUID\\\":\\\"9da27605-fb2f-423a-b7bb-978a678a6bed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:45Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.273838 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.274032 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.274116 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.274197 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.274275 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:45Z","lastTransitionTime":"2025-12-09T15:09:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:45 crc kubenswrapper[4716]: E1209 15:09:45.286587 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d76960a-8b61-4af0-9ff6-3ba0f6120862\\\",\\\"systemUUID\\\":\\\"9da27605-fb2f-423a-b7bb-978a678a6bed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:45Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.290752 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.290791 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.290802 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.290819 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.290829 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:45Z","lastTransitionTime":"2025-12-09T15:09:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:45 crc kubenswrapper[4716]: E1209 15:09:45.303835 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d76960a-8b61-4af0-9ff6-3ba0f6120862\\\",\\\"systemUUID\\\":\\\"9da27605-fb2f-423a-b7bb-978a678a6bed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:45Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.307945 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.307987 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.307997 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.308014 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.308026 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:45Z","lastTransitionTime":"2025-12-09T15:09:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:45 crc kubenswrapper[4716]: E1209 15:09:45.320903 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d76960a-8b61-4af0-9ff6-3ba0f6120862\\\",\\\"systemUUID\\\":\\\"9da27605-fb2f-423a-b7bb-978a678a6bed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:45Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:45 crc kubenswrapper[4716]: E1209 15:09:45.321059 4716 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.322934 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.322985 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.322997 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.323016 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.323035 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:45Z","lastTransitionTime":"2025-12-09T15:09:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.427265 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.427317 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.427329 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.427351 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.427363 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:45Z","lastTransitionTime":"2025-12-09T15:09:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.529957 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.529999 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.530011 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.530028 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.530038 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:45Z","lastTransitionTime":"2025-12-09T15:09:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.631686 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.631717 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.631727 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.631742 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.631754 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:45Z","lastTransitionTime":"2025-12-09T15:09:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.734392 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.734422 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.734432 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.734446 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.734456 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:45Z","lastTransitionTime":"2025-12-09T15:09:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.836864 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.836909 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.836921 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.836938 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.836949 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:45Z","lastTransitionTime":"2025-12-09T15:09:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.939875 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.939928 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.939946 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.939963 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:45 crc kubenswrapper[4716]: I1209 15:09:45.939973 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:45Z","lastTransitionTime":"2025-12-09T15:09:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.042346 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.042400 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.042413 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.042433 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.042447 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:46Z","lastTransitionTime":"2025-12-09T15:09:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.145157 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.145204 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.145215 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.145235 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.145262 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:46Z","lastTransitionTime":"2025-12-09T15:09:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.213349 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.213479 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:09:46 crc kubenswrapper[4716]: E1209 15:09:46.213522 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:09:46 crc kubenswrapper[4716]: E1209 15:09:46.213712 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.247917 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.247964 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.247976 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.247995 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.248010 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:46Z","lastTransitionTime":"2025-12-09T15:09:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.350569 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.350639 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.350654 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.350673 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.350688 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:46Z","lastTransitionTime":"2025-12-09T15:09:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.453961 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.454026 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.454038 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.454060 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.454079 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:46Z","lastTransitionTime":"2025-12-09T15:09:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.556535 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.556584 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.556599 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.556616 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.556650 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:46Z","lastTransitionTime":"2025-12-09T15:09:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.658805 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.658834 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.658842 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.658856 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.658865 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:46Z","lastTransitionTime":"2025-12-09T15:09:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.761898 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.761967 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.762034 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.762058 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.762071 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:46Z","lastTransitionTime":"2025-12-09T15:09:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.866311 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.866358 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.866372 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.866389 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.866585 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:46Z","lastTransitionTime":"2025-12-09T15:09:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.969199 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.969243 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.969259 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.969284 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:46 crc kubenswrapper[4716]: I1209 15:09:46.969302 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:46Z","lastTransitionTime":"2025-12-09T15:09:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.072549 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.072601 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.072616 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.072682 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.072698 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:47Z","lastTransitionTime":"2025-12-09T15:09:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.175516 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.175545 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.175555 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.175569 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.175594 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:47Z","lastTransitionTime":"2025-12-09T15:09:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.212959 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.213012 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:09:47 crc kubenswrapper[4716]: E1209 15:09:47.213149 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:09:47 crc kubenswrapper[4716]: E1209 15:09:47.213731 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.213806 4716 scope.go:117] "RemoveContainer" containerID="86ae7284d29b1843f9a37270d53192c39c9d582f480af9df7cec9ae6a027f771" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.278740 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.278795 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.278810 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.278831 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.278847 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:47Z","lastTransitionTime":"2025-12-09T15:09:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.381783 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.381806 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.381815 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.381829 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.381837 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:47Z","lastTransitionTime":"2025-12-09T15:09:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.485038 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.485088 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.485100 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.485119 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.485131 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:47Z","lastTransitionTime":"2025-12-09T15:09:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.587694 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.587768 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.587797 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.587832 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.587857 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:47Z","lastTransitionTime":"2025-12-09T15:09:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.635254 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hcdn4_3d953045-e94a-4e04-b78e-bc20b3a8c36c/ovnkube-controller/2.log" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.637593 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" event={"ID":"3d953045-e94a-4e04-b78e-bc20b3a8c36c","Type":"ContainerStarted","Data":"50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69"} Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.639115 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.658255 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:47Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.672383 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8142ea19-bdab-43d4-b6ec-0eb2dea21b60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51c336339c165a67392862787d5ffdab4fba030a838bcb28cecb6ef0a1b1633a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4977d7373e96d2e9d86ee20a1dadffb84228e1a466bce24e5b5af6c1c6ec93b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4704af0bc981b22b47ed768b34abe27a70ec9445a3b86cbe81e44115c1e6db55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8fe87f75d755671ee24326254af036cce1ac0ef201535b3eb56fde3c3dfea65a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8fe87f75d755671ee24326254af036cce1ac0ef201535b3eb56fde3c3dfea65a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:47Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.685973 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:47Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.690111 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.690151 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.690163 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.690183 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.690194 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:47Z","lastTransitionTime":"2025-12-09T15:09:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.698478 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:47Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.712926 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:47Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.728037 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f66b28-96d7-4c4f-b567-7f0917812e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05b0adb7ab9ef293fc4852b947c3c00832260edfb0472d0477e3c529cef5a814\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qrqxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:47Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.739042 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gpl2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0e1f1bc-46bd-4293-a9b9-d57c1f83a613\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wxrl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wxrl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:09:10Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gpl2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:47Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.751763 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:47Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.764854 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8clts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df5b4a13-8990-44d5-8b66-a3b696c9774f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11c3246d292d0a1c1ca6f46baf71f93f06b00755a208e4e6fed6cc3653408589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dc6th\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8clts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:47Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.790688 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d953045-e94a-4e04-b78e-bc20b3a8c36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ae7284d29b1843f9a37270d53192c39c9d582f480af9df7cec9ae6a027f771\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T15:09:23Z\\\",\\\"message\\\":\\\"map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 15:09:22.997308 6368 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 15:09:22.997228 6368 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1209 15:09:22.997410 6368 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1209 15:09:22.997445 6368 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:22Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hcdn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:47Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.792160 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.792217 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.792234 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.792254 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.792267 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:47Z","lastTransitionTime":"2025-12-09T15:09:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.805294 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:47Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.817845 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:47Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.830502 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:47Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.842125 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-q8f5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92bbf960-39e4-4521-9ba9-c66d302ceb3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2496e752c40e1c175f4ea3321eef81439ae0cd8b988efddf694e31b517d47d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fr45r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-q8f5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:47Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.854318 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rqz4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2fd13c7fc5ee8080d3513f100512c1ebd48490bbe1f36ff53396b7aa3d9526b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T15:09:42Z\\\",\\\"message\\\":\\\"2025-12-09T15:08:57+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_d33c09ce-c19a-48ac-9883-f562acb4fa1a\\\\n2025-12-09T15:08:57+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_d33c09ce-c19a-48ac-9883-f562acb4fa1a to /host/opt/cni/bin/\\\\n2025-12-09T15:08:57Z [verbose] multus-daemon started\\\\n2025-12-09T15:08:57Z [verbose] Readiness Indicator file check\\\\n2025-12-09T15:09:42Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-plvv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rqz4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:47Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.864864 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef5255ac80d96695612cdac86e63f82688271183d2daa61af9122a1bdd801089\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rdkb2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:47Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.875486 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff607c0e-2a04-4599-b107-8ccbfed1d376\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fada1f30125149bddf6047b764fcdbbecce679857a66b8f2eaa7c00d84a5b9d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c9ead9bb35730e5bec0cd95c265e4918a0dc3dd1dd0330a27316de4f3901108\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:09:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7wtn7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:47Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.894081 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.894369 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.894430 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.894498 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.894562 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:47Z","lastTransitionTime":"2025-12-09T15:09:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.997788 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.997852 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.997871 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.997895 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:47 crc kubenswrapper[4716]: I1209 15:09:47.997913 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:47Z","lastTransitionTime":"2025-12-09T15:09:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.100902 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.101213 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.101392 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.101516 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.101575 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:48Z","lastTransitionTime":"2025-12-09T15:09:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.204968 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.205029 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.205046 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.205070 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.205088 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:48Z","lastTransitionTime":"2025-12-09T15:09:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.213614 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:09:48 crc kubenswrapper[4716]: E1209 15:09:48.213945 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.213654 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:09:48 crc kubenswrapper[4716]: E1209 15:09:48.214152 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.307224 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.307834 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.307868 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.307890 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.307903 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:48Z","lastTransitionTime":"2025-12-09T15:09:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.410335 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.410395 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.410412 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.410437 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.410457 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:48Z","lastTransitionTime":"2025-12-09T15:09:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.513968 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.514017 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.514029 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.514045 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.514058 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:48Z","lastTransitionTime":"2025-12-09T15:09:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.616748 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.616801 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.616820 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.616855 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.616873 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:48Z","lastTransitionTime":"2025-12-09T15:09:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.642173 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hcdn4_3d953045-e94a-4e04-b78e-bc20b3a8c36c/ovnkube-controller/3.log" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.642939 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hcdn4_3d953045-e94a-4e04-b78e-bc20b3a8c36c/ovnkube-controller/2.log" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.645584 4716 generic.go:334] "Generic (PLEG): container finished" podID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerID="50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69" exitCode=1 Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.645632 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" event={"ID":"3d953045-e94a-4e04-b78e-bc20b3a8c36c","Type":"ContainerDied","Data":"50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69"} Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.645666 4716 scope.go:117] "RemoveContainer" containerID="86ae7284d29b1843f9a37270d53192c39c9d582f480af9df7cec9ae6a027f771" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.646297 4716 scope.go:117] "RemoveContainer" containerID="50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69" Dec 09 15:09:48 crc kubenswrapper[4716]: E1209 15:09:48.646429 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hcdn4_openshift-ovn-kubernetes(3d953045-e94a-4e04-b78e-bc20b3a8c36c)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.665214 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:48Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.676024 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8clts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df5b4a13-8990-44d5-8b66-a3b696c9774f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11c3246d292d0a1c1ca6f46baf71f93f06b00755a208e4e6fed6cc3653408589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dc6th\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8clts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:48Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.700119 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d953045-e94a-4e04-b78e-bc20b3a8c36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://86ae7284d29b1843f9a37270d53192c39c9d582f480af9df7cec9ae6a027f771\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T15:09:23Z\\\",\\\"message\\\":\\\"map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 15:09:22.997308 6368 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 15:09:22.997228 6368 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1209 15:09:22.997410 6368 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1209 15:09:22.997445 6368 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:22Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T15:09:48Z\\\",\\\"message\\\":\\\"ng zone local for Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf in node crc\\\\nI1209 15:09:48.032485 6781 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 15:09:48.032503 6781 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf after 0 failed attempt(s)\\\\nI1209 15:09:48.032508 6781 default_network_controller.go:776] Recording success event on pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nF1209 15:09:48.032185 6781 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hcdn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:48Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.713611 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rqz4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2fd13c7fc5ee8080d3513f100512c1ebd48490bbe1f36ff53396b7aa3d9526b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T15:09:42Z\\\",\\\"message\\\":\\\"2025-12-09T15:08:57+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_d33c09ce-c19a-48ac-9883-f562acb4fa1a\\\\n2025-12-09T15:08:57+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_d33c09ce-c19a-48ac-9883-f562acb4fa1a to /host/opt/cni/bin/\\\\n2025-12-09T15:08:57Z [verbose] multus-daemon started\\\\n2025-12-09T15:08:57Z [verbose] Readiness Indicator file check\\\\n2025-12-09T15:09:42Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-plvv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rqz4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:48Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.719113 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.719167 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.719178 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.719195 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.719207 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:48Z","lastTransitionTime":"2025-12-09T15:09:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.728123 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef5255ac80d96695612cdac86e63f82688271183d2daa61af9122a1bdd801089\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rdkb2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:48Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.740670 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff607c0e-2a04-4599-b107-8ccbfed1d376\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fada1f30125149bddf6047b764fcdbbecce679857a66b8f2eaa7c00d84a5b9d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c9ead9bb35730e5bec0cd95c265e4918a0dc3dd1dd0330a27316de4f3901108\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:09:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7wtn7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:48Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.752507 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:48Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.764360 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:48Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.773949 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:48Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.782013 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-q8f5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92bbf960-39e4-4521-9ba9-c66d302ceb3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2496e752c40e1c175f4ea3321eef81439ae0cd8b988efddf694e31b517d47d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fr45r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-q8f5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:48Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.797030 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:48Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.811690 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f66b28-96d7-4c4f-b567-7f0917812e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05b0adb7ab9ef293fc4852b947c3c00832260edfb0472d0477e3c529cef5a814\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qrqxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:48Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.824115 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.824232 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.824247 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.824270 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.824285 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:48Z","lastTransitionTime":"2025-12-09T15:09:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.825968 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gpl2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0e1f1bc-46bd-4293-a9b9-d57c1f83a613\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wxrl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wxrl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:09:10Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gpl2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:48Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.841528 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:48Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.856472 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8142ea19-bdab-43d4-b6ec-0eb2dea21b60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51c336339c165a67392862787d5ffdab4fba030a838bcb28cecb6ef0a1b1633a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4977d7373e96d2e9d86ee20a1dadffb84228e1a466bce24e5b5af6c1c6ec93b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4704af0bc981b22b47ed768b34abe27a70ec9445a3b86cbe81e44115c1e6db55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8fe87f75d755671ee24326254af036cce1ac0ef201535b3eb56fde3c3dfea65a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8fe87f75d755671ee24326254af036cce1ac0ef201535b3eb56fde3c3dfea65a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:48Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.871426 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:48Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.885089 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:48Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.927159 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.927200 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.927209 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.927226 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:48 crc kubenswrapper[4716]: I1209 15:09:48.927236 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:48Z","lastTransitionTime":"2025-12-09T15:09:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.029396 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.029451 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.029462 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.029476 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.029485 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:49Z","lastTransitionTime":"2025-12-09T15:09:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.132458 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.132498 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.132511 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.132528 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.132539 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:49Z","lastTransitionTime":"2025-12-09T15:09:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.213610 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.213701 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:09:49 crc kubenswrapper[4716]: E1209 15:09:49.213804 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:09:49 crc kubenswrapper[4716]: E1209 15:09:49.213932 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.235575 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.235660 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.235675 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.235700 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.235714 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:49Z","lastTransitionTime":"2025-12-09T15:09:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.338518 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.338574 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.338586 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.338612 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.338644 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:49Z","lastTransitionTime":"2025-12-09T15:09:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.441215 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.441263 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.441275 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.441292 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.441305 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:49Z","lastTransitionTime":"2025-12-09T15:09:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.547545 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.547700 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.547750 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.547790 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.547801 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:49Z","lastTransitionTime":"2025-12-09T15:09:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.649846 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.649902 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.649918 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.649941 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.649958 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:49Z","lastTransitionTime":"2025-12-09T15:09:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.651723 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hcdn4_3d953045-e94a-4e04-b78e-bc20b3a8c36c/ovnkube-controller/3.log" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.654862 4716 scope.go:117] "RemoveContainer" containerID="50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69" Dec 09 15:09:49 crc kubenswrapper[4716]: E1209 15:09:49.655064 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hcdn4_openshift-ovn-kubernetes(3d953045-e94a-4e04-b78e-bc20b3a8c36c)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.668494 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:49Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.680145 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:49Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.694371 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f66b28-96d7-4c4f-b567-7f0917812e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05b0adb7ab9ef293fc4852b947c3c00832260edfb0472d0477e3c529cef5a814\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qrqxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:49Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.706019 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gpl2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0e1f1bc-46bd-4293-a9b9-d57c1f83a613\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wxrl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wxrl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:09:10Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gpl2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:49Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.719546 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:49Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.731441 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8142ea19-bdab-43d4-b6ec-0eb2dea21b60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51c336339c165a67392862787d5ffdab4fba030a838bcb28cecb6ef0a1b1633a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4977d7373e96d2e9d86ee20a1dadffb84228e1a466bce24e5b5af6c1c6ec93b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4704af0bc981b22b47ed768b34abe27a70ec9445a3b86cbe81e44115c1e6db55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8fe87f75d755671ee24326254af036cce1ac0ef201535b3eb56fde3c3dfea65a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8fe87f75d755671ee24326254af036cce1ac0ef201535b3eb56fde3c3dfea65a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:49Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.746139 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:49Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.752465 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.752501 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.752510 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.752525 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.752536 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:49Z","lastTransitionTime":"2025-12-09T15:09:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.760347 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:49Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.770574 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8clts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df5b4a13-8990-44d5-8b66-a3b696c9774f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11c3246d292d0a1c1ca6f46baf71f93f06b00755a208e4e6fed6cc3653408589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dc6th\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8clts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:49Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.787865 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d953045-e94a-4e04-b78e-bc20b3a8c36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T15:09:48Z\\\",\\\"message\\\":\\\"ng zone local for Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf in node crc\\\\nI1209 15:09:48.032485 6781 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 15:09:48.032503 6781 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf after 0 failed attempt(s)\\\\nI1209 15:09:48.032508 6781 default_network_controller.go:776] Recording success event on pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nF1209 15:09:48.032185 6781 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:47Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hcdn4_openshift-ovn-kubernetes(3d953045-e94a-4e04-b78e-bc20b3a8c36c)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hcdn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:49Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.798228 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-q8f5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92bbf960-39e4-4521-9ba9-c66d302ceb3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2496e752c40e1c175f4ea3321eef81439ae0cd8b988efddf694e31b517d47d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fr45r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-q8f5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:49Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.809874 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rqz4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2fd13c7fc5ee8080d3513f100512c1ebd48490bbe1f36ff53396b7aa3d9526b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T15:09:42Z\\\",\\\"message\\\":\\\"2025-12-09T15:08:57+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_d33c09ce-c19a-48ac-9883-f562acb4fa1a\\\\n2025-12-09T15:08:57+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_d33c09ce-c19a-48ac-9883-f562acb4fa1a to /host/opt/cni/bin/\\\\n2025-12-09T15:08:57Z [verbose] multus-daemon started\\\\n2025-12-09T15:08:57Z [verbose] Readiness Indicator file check\\\\n2025-12-09T15:09:42Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-plvv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rqz4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:49Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.820675 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef5255ac80d96695612cdac86e63f82688271183d2daa61af9122a1bdd801089\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rdkb2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:49Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.832897 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff607c0e-2a04-4599-b107-8ccbfed1d376\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fada1f30125149bddf6047b764fcdbbecce679857a66b8f2eaa7c00d84a5b9d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c9ead9bb35730e5bec0cd95c265e4918a0dc3dd1dd0330a27316de4f3901108\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:09:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7wtn7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:49Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.844927 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:49Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.854832 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.854880 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.854888 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.854903 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.854913 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:49Z","lastTransitionTime":"2025-12-09T15:09:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.858904 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:49Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.869591 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:49Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.958033 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.958083 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.958093 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.958112 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:49 crc kubenswrapper[4716]: I1209 15:09:49.958121 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:49Z","lastTransitionTime":"2025-12-09T15:09:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.060407 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.060451 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.060466 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.060486 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.060498 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:50Z","lastTransitionTime":"2025-12-09T15:09:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.163189 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.163239 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.163252 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.163268 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.163278 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:50Z","lastTransitionTime":"2025-12-09T15:09:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.213369 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.213428 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:09:50 crc kubenswrapper[4716]: E1209 15:09:50.213598 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:09:50 crc kubenswrapper[4716]: E1209 15:09:50.213747 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.266449 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.266517 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.266544 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.266576 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.266602 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:50Z","lastTransitionTime":"2025-12-09T15:09:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.370282 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.370336 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.370352 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.370371 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.370386 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:50Z","lastTransitionTime":"2025-12-09T15:09:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.472674 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.472718 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.472732 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.472753 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.472767 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:50Z","lastTransitionTime":"2025-12-09T15:09:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.576028 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.576070 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.576083 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.576105 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.576118 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:50Z","lastTransitionTime":"2025-12-09T15:09:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.679938 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.680306 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.680407 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.680601 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.680779 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:50Z","lastTransitionTime":"2025-12-09T15:09:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.783975 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.784243 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.784313 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.784391 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.784459 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:50Z","lastTransitionTime":"2025-12-09T15:09:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.886978 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.887011 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.887023 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.887039 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.887051 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:50Z","lastTransitionTime":"2025-12-09T15:09:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.990068 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.990147 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.990173 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.990230 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:50 crc kubenswrapper[4716]: I1209 15:09:50.990257 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:50Z","lastTransitionTime":"2025-12-09T15:09:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.093833 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.093893 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.093911 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.093935 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.093956 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:51Z","lastTransitionTime":"2025-12-09T15:09:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.197173 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.197225 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.197236 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.197252 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.197260 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:51Z","lastTransitionTime":"2025-12-09T15:09:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.213708 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.213785 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:09:51 crc kubenswrapper[4716]: E1209 15:09:51.214018 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:09:51 crc kubenswrapper[4716]: E1209 15:09:51.214096 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.237111 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.300118 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.300184 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.300197 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.300214 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.300226 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:51Z","lastTransitionTime":"2025-12-09T15:09:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.403722 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.403785 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.403807 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.403836 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.403855 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:51Z","lastTransitionTime":"2025-12-09T15:09:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.507346 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.507417 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.507441 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.507472 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.507492 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:51Z","lastTransitionTime":"2025-12-09T15:09:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.610795 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.610842 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.610852 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.610871 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.610886 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:51Z","lastTransitionTime":"2025-12-09T15:09:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.714463 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.714646 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.714672 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.714697 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.714717 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:51Z","lastTransitionTime":"2025-12-09T15:09:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.818067 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.818116 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.818128 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.818146 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.818158 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:51Z","lastTransitionTime":"2025-12-09T15:09:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.921146 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.921195 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.921210 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.921233 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:51 crc kubenswrapper[4716]: I1209 15:09:51.921247 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:51Z","lastTransitionTime":"2025-12-09T15:09:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.023783 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.023853 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.023863 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.023884 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.023893 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:52Z","lastTransitionTime":"2025-12-09T15:09:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.126927 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.126974 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.126992 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.127010 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.127025 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:52Z","lastTransitionTime":"2025-12-09T15:09:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.213215 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.213288 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:09:52 crc kubenswrapper[4716]: E1209 15:09:52.213359 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:09:52 crc kubenswrapper[4716]: E1209 15:09:52.213434 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.229075 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.229136 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.229147 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.229164 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.229176 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:52Z","lastTransitionTime":"2025-12-09T15:09:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.331334 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.331378 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.331388 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.331406 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.331417 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:52Z","lastTransitionTime":"2025-12-09T15:09:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.434914 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.434987 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.435011 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.435043 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.435067 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:52Z","lastTransitionTime":"2025-12-09T15:09:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.537949 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.538003 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.538019 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.538037 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.538048 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:52Z","lastTransitionTime":"2025-12-09T15:09:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.640203 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.640255 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.640270 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.640292 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.640305 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:52Z","lastTransitionTime":"2025-12-09T15:09:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.742574 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.742603 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.742611 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.742633 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.742643 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:52Z","lastTransitionTime":"2025-12-09T15:09:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.845970 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.846007 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.846018 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.846036 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.846048 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:52Z","lastTransitionTime":"2025-12-09T15:09:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.948455 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.948512 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.948521 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.948537 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:52 crc kubenswrapper[4716]: I1209 15:09:52.948552 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:52Z","lastTransitionTime":"2025-12-09T15:09:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.051212 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.051253 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.051264 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.051280 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.051293 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:53Z","lastTransitionTime":"2025-12-09T15:09:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.154414 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.154446 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.154456 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.154471 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.154483 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:53Z","lastTransitionTime":"2025-12-09T15:09:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.213984 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.214090 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:09:53 crc kubenswrapper[4716]: E1209 15:09:53.214208 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:09:53 crc kubenswrapper[4716]: E1209 15:09:53.214330 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.227924 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8142ea19-bdab-43d4-b6ec-0eb2dea21b60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51c336339c165a67392862787d5ffdab4fba030a838bcb28cecb6ef0a1b1633a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4977d7373e96d2e9d86ee20a1dadffb84228e1a466bce24e5b5af6c1c6ec93b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4704af0bc981b22b47ed768b34abe27a70ec9445a3b86cbe81e44115c1e6db55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8fe87f75d755671ee24326254af036cce1ac0ef201535b3eb56fde3c3dfea65a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8fe87f75d755671ee24326254af036cce1ac0ef201535b3eb56fde3c3dfea65a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:53Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.244474 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:53Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.264993 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.265032 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.265042 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.265060 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.265073 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:53Z","lastTransitionTime":"2025-12-09T15:09:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.267189 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:53Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.282664 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:53Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.299101 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f66b28-96d7-4c4f-b567-7f0917812e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05b0adb7ab9ef293fc4852b947c3c00832260edfb0472d0477e3c529cef5a814\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qrqxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:53Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.313682 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gpl2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0e1f1bc-46bd-4293-a9b9-d57c1f83a613\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wxrl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wxrl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:09:10Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gpl2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:53Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.328173 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:53Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.344237 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:53Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.357672 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8clts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df5b4a13-8990-44d5-8b66-a3b696c9774f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11c3246d292d0a1c1ca6f46baf71f93f06b00755a208e4e6fed6cc3653408589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dc6th\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8clts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:53Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.367775 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.367831 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.367846 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.367867 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.367882 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:53Z","lastTransitionTime":"2025-12-09T15:09:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.377421 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d953045-e94a-4e04-b78e-bc20b3a8c36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T15:09:48Z\\\",\\\"message\\\":\\\"ng zone local for Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf in node crc\\\\nI1209 15:09:48.032485 6781 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 15:09:48.032503 6781 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf after 0 failed attempt(s)\\\\nI1209 15:09:48.032508 6781 default_network_controller.go:776] Recording success event on pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nF1209 15:09:48.032185 6781 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:47Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hcdn4_openshift-ovn-kubernetes(3d953045-e94a-4e04-b78e-bc20b3a8c36c)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hcdn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:53Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.399275 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2a2d930-856c-4375-8e68-51ecaa63699b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b730df9d371dd6777ff1ba46c584e9cf3b1a99d65d0a1880413036962d7db099\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://db112dc4ac916254c8ca33290be6ad4d91b85d7c9e17872e67916df15ae25428\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8396257b714afac16cd906de4f119eb5875484a204f3071303336dfc00106118\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ac6946b598c9186358ce80d7dfe09dddc7be095a9037265ef4d78a5d9560e1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a355c46f0a4ee9ca369b17ec4b24b554d0eae49d84a5ce7ce318a92e37d46cd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00d7a5ce1b61692a21155afd4c4eb12d16829994e3163be6b3ba0989d5336083\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00d7a5ce1b61692a21155afd4c4eb12d16829994e3163be6b3ba0989d5336083\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b62285bb69b8af9786731a0ec7b5a2bf83adb8fc922f35db071731ab1f23ab18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b62285bb69b8af9786731a0ec7b5a2bf83adb8fc922f35db071731ab1f23ab18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4cba2091f0b3d2bfce763b41a854b307ff86a2abf3ebf56dcbadce37ee149f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4cba2091f0b3d2bfce763b41a854b307ff86a2abf3ebf56dcbadce37ee149f5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:53Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.411967 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:53Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.423161 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:53Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.434461 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-q8f5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92bbf960-39e4-4521-9ba9-c66d302ceb3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2496e752c40e1c175f4ea3321eef81439ae0cd8b988efddf694e31b517d47d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fr45r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-q8f5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:53Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.446551 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rqz4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2fd13c7fc5ee8080d3513f100512c1ebd48490bbe1f36ff53396b7aa3d9526b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T15:09:42Z\\\",\\\"message\\\":\\\"2025-12-09T15:08:57+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_d33c09ce-c19a-48ac-9883-f562acb4fa1a\\\\n2025-12-09T15:08:57+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_d33c09ce-c19a-48ac-9883-f562acb4fa1a to /host/opt/cni/bin/\\\\n2025-12-09T15:08:57Z [verbose] multus-daemon started\\\\n2025-12-09T15:08:57Z [verbose] Readiness Indicator file check\\\\n2025-12-09T15:09:42Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-plvv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rqz4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:53Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.457657 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef5255ac80d96695612cdac86e63f82688271183d2daa61af9122a1bdd801089\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rdkb2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:53Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.469298 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff607c0e-2a04-4599-b107-8ccbfed1d376\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fada1f30125149bddf6047b764fcdbbecce679857a66b8f2eaa7c00d84a5b9d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c9ead9bb35730e5bec0cd95c265e4918a0dc3dd1dd0330a27316de4f3901108\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:09:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7wtn7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:53Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.470095 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.470140 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.470153 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.470167 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.470175 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:53Z","lastTransitionTime":"2025-12-09T15:09:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.483186 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:53Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.572734 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.573089 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.573191 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.573284 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.573367 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:53Z","lastTransitionTime":"2025-12-09T15:09:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.676314 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.676377 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.676397 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.676426 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.676448 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:53Z","lastTransitionTime":"2025-12-09T15:09:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.778940 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.779004 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.779021 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.779046 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.779067 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:53Z","lastTransitionTime":"2025-12-09T15:09:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.881329 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.881370 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.881379 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.881394 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.881404 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:53Z","lastTransitionTime":"2025-12-09T15:09:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.984200 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.984244 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.984254 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.984271 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:53 crc kubenswrapper[4716]: I1209 15:09:53.984280 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:53Z","lastTransitionTime":"2025-12-09T15:09:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.086381 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.086737 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.086872 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.086979 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.087045 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:54Z","lastTransitionTime":"2025-12-09T15:09:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.190129 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.190180 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.190195 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.190217 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.190233 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:54Z","lastTransitionTime":"2025-12-09T15:09:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.213453 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:09:54 crc kubenswrapper[4716]: E1209 15:09:54.213596 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.213951 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:09:54 crc kubenswrapper[4716]: E1209 15:09:54.214295 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.293228 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.293543 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.293647 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.293757 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.293842 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:54Z","lastTransitionTime":"2025-12-09T15:09:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.396041 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.396077 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.396089 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.396107 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.396700 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:54Z","lastTransitionTime":"2025-12-09T15:09:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.499995 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.500061 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.500072 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.500090 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.500100 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:54Z","lastTransitionTime":"2025-12-09T15:09:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.602898 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.602939 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.602949 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.602973 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.602984 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:54Z","lastTransitionTime":"2025-12-09T15:09:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.705727 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.705814 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.705849 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.705875 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.705893 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:54Z","lastTransitionTime":"2025-12-09T15:09:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.808816 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.808862 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.808876 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.808893 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.808906 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:54Z","lastTransitionTime":"2025-12-09T15:09:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.911824 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.911875 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.911887 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.911906 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:54 crc kubenswrapper[4716]: I1209 15:09:54.911918 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:54Z","lastTransitionTime":"2025-12-09T15:09:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.014515 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.014554 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.014565 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.014579 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.014588 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:55Z","lastTransitionTime":"2025-12-09T15:09:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.092236 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:09:55 crc kubenswrapper[4716]: E1209 15:09:55.092426 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:59.092396181 +0000 UTC m=+146.247140169 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.117404 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.117813 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.117939 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.118042 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.118142 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:55Z","lastTransitionTime":"2025-12-09T15:09:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.193588 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.194010 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.194134 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:09:55 crc kubenswrapper[4716]: E1209 15:09:55.193903 4716 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.194260 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:09:55 crc kubenswrapper[4716]: E1209 15:09:55.194235 4716 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 09 15:09:55 crc kubenswrapper[4716]: E1209 15:09:55.194273 4716 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 09 15:09:55 crc kubenswrapper[4716]: E1209 15:09:55.194489 4716 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 09 15:09:55 crc kubenswrapper[4716]: E1209 15:09:55.194513 4716 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 15:09:55 crc kubenswrapper[4716]: E1209 15:09:55.194363 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-09 15:10:59.194335896 +0000 UTC m=+146.349079914 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 09 15:09:55 crc kubenswrapper[4716]: E1209 15:09:55.194584 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-09 15:10:59.194552032 +0000 UTC m=+146.349296180 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 09 15:09:55 crc kubenswrapper[4716]: E1209 15:09:55.194614 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-09 15:10:59.194603953 +0000 UTC m=+146.349347931 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 15:09:55 crc kubenswrapper[4716]: E1209 15:09:55.195002 4716 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 09 15:09:55 crc kubenswrapper[4716]: E1209 15:09:55.195059 4716 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 09 15:09:55 crc kubenswrapper[4716]: E1209 15:09:55.195074 4716 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 15:09:55 crc kubenswrapper[4716]: E1209 15:09:55.195158 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-09 15:10:59.195132708 +0000 UTC m=+146.349876866 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.213837 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.213924 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:09:55 crc kubenswrapper[4716]: E1209 15:09:55.214066 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:09:55 crc kubenswrapper[4716]: E1209 15:09:55.214208 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.219852 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.219884 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.219892 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.219906 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.219915 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:55Z","lastTransitionTime":"2025-12-09T15:09:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.322445 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.322491 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.322500 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.322516 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.322529 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:55Z","lastTransitionTime":"2025-12-09T15:09:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.424992 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.425047 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.425056 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.425072 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.425081 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:55Z","lastTransitionTime":"2025-12-09T15:09:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.527775 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.527843 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.527867 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.527898 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.527924 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:55Z","lastTransitionTime":"2025-12-09T15:09:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.630367 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.630406 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.630417 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.630437 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.630449 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:55Z","lastTransitionTime":"2025-12-09T15:09:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.682053 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.682096 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.682107 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.682124 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.682135 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:55Z","lastTransitionTime":"2025-12-09T15:09:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:55 crc kubenswrapper[4716]: E1209 15:09:55.695479 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d76960a-8b61-4af0-9ff6-3ba0f6120862\\\",\\\"systemUUID\\\":\\\"9da27605-fb2f-423a-b7bb-978a678a6bed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:55Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.700745 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.700788 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.700800 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.700818 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.700829 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:55Z","lastTransitionTime":"2025-12-09T15:09:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:55 crc kubenswrapper[4716]: E1209 15:09:55.715368 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d76960a-8b61-4af0-9ff6-3ba0f6120862\\\",\\\"systemUUID\\\":\\\"9da27605-fb2f-423a-b7bb-978a678a6bed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:55Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.720120 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.720167 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.720211 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.720233 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.720248 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:55Z","lastTransitionTime":"2025-12-09T15:09:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:55 crc kubenswrapper[4716]: E1209 15:09:55.735493 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d76960a-8b61-4af0-9ff6-3ba0f6120862\\\",\\\"systemUUID\\\":\\\"9da27605-fb2f-423a-b7bb-978a678a6bed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:55Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.740200 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.740246 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.740267 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.740304 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.740326 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:55Z","lastTransitionTime":"2025-12-09T15:09:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:55 crc kubenswrapper[4716]: E1209 15:09:55.791873 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d76960a-8b61-4af0-9ff6-3ba0f6120862\\\",\\\"systemUUID\\\":\\\"9da27605-fb2f-423a-b7bb-978a678a6bed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:55Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.795947 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.796140 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.796251 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.796357 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.796451 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:55Z","lastTransitionTime":"2025-12-09T15:09:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:55 crc kubenswrapper[4716]: E1209 15:09:55.809005 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"7d76960a-8b61-4af0-9ff6-3ba0f6120862\\\",\\\"systemUUID\\\":\\\"9da27605-fb2f-423a-b7bb-978a678a6bed\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:09:55Z is after 2025-08-24T17:21:41Z" Dec 09 15:09:55 crc kubenswrapper[4716]: E1209 15:09:55.809185 4716 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.810721 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.810828 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.810899 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.810970 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.811028 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:55Z","lastTransitionTime":"2025-12-09T15:09:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.914263 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.914306 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.914318 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.914335 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:55 crc kubenswrapper[4716]: I1209 15:09:55.914347 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:55Z","lastTransitionTime":"2025-12-09T15:09:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.017678 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.017718 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.017729 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.017748 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.017760 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:56Z","lastTransitionTime":"2025-12-09T15:09:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.120141 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.120173 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.120184 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.120200 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.120209 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:56Z","lastTransitionTime":"2025-12-09T15:09:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.213114 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.213131 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:09:56 crc kubenswrapper[4716]: E1209 15:09:56.213278 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:09:56 crc kubenswrapper[4716]: E1209 15:09:56.213360 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.223296 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.223346 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.223362 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.223381 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.223396 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:56Z","lastTransitionTime":"2025-12-09T15:09:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.326398 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.326447 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.326459 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.326478 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.326490 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:56Z","lastTransitionTime":"2025-12-09T15:09:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.429682 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.429725 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.429735 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.429751 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.429770 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:56Z","lastTransitionTime":"2025-12-09T15:09:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.532532 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.532588 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.532601 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.532638 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.532652 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:56Z","lastTransitionTime":"2025-12-09T15:09:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.634938 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.634980 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.635002 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.635024 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.635064 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:56Z","lastTransitionTime":"2025-12-09T15:09:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.737582 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.737855 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.737918 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.738040 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.738120 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:56Z","lastTransitionTime":"2025-12-09T15:09:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.842170 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.842221 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.842232 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.842249 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.842259 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:56Z","lastTransitionTime":"2025-12-09T15:09:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.945444 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.945813 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.945927 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.946008 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:56 crc kubenswrapper[4716]: I1209 15:09:56.946066 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:56Z","lastTransitionTime":"2025-12-09T15:09:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.048652 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.048696 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.048708 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.048724 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.048732 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:57Z","lastTransitionTime":"2025-12-09T15:09:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.151727 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.151776 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.151791 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.151809 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.151820 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:57Z","lastTransitionTime":"2025-12-09T15:09:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.213465 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.213543 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:09:57 crc kubenswrapper[4716]: E1209 15:09:57.213592 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:09:57 crc kubenswrapper[4716]: E1209 15:09:57.213929 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.254311 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.254350 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.254359 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.254373 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.254383 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:57Z","lastTransitionTime":"2025-12-09T15:09:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.357379 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.357438 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.357453 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.357476 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.357492 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:57Z","lastTransitionTime":"2025-12-09T15:09:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.460669 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.460722 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.460735 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.460755 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.460767 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:57Z","lastTransitionTime":"2025-12-09T15:09:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.563271 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.563306 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.563315 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.563329 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.563337 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:57Z","lastTransitionTime":"2025-12-09T15:09:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.665777 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.665860 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.665876 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.665900 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.665917 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:57Z","lastTransitionTime":"2025-12-09T15:09:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.769003 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.769052 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.769067 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.769086 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.769100 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:57Z","lastTransitionTime":"2025-12-09T15:09:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.872155 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.872197 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.872213 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.872230 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.872244 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:57Z","lastTransitionTime":"2025-12-09T15:09:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.974870 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.974932 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.974947 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.974964 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:57 crc kubenswrapper[4716]: I1209 15:09:57.974975 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:57Z","lastTransitionTime":"2025-12-09T15:09:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.077237 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.077279 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.077291 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.077313 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.077329 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:58Z","lastTransitionTime":"2025-12-09T15:09:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.179855 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.179927 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.179954 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.179973 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.179983 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:58Z","lastTransitionTime":"2025-12-09T15:09:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.213493 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:09:58 crc kubenswrapper[4716]: E1209 15:09:58.213602 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.213497 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:09:58 crc kubenswrapper[4716]: E1209 15:09:58.213889 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.226382 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.283185 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.283239 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.283257 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.283278 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.283290 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:58Z","lastTransitionTime":"2025-12-09T15:09:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.385885 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.385917 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.385925 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.385939 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.385948 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:58Z","lastTransitionTime":"2025-12-09T15:09:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.487774 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.487823 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.487840 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.487863 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.487880 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:58Z","lastTransitionTime":"2025-12-09T15:09:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.590236 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.590292 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.590304 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.590326 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.590337 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:58Z","lastTransitionTime":"2025-12-09T15:09:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.692446 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.692513 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.692525 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.692543 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.692557 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:58Z","lastTransitionTime":"2025-12-09T15:09:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.794836 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.794883 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.794895 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.794911 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.794924 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:58Z","lastTransitionTime":"2025-12-09T15:09:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.898212 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.898574 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.898663 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.898751 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:58 crc kubenswrapper[4716]: I1209 15:09:58.898816 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:58Z","lastTransitionTime":"2025-12-09T15:09:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.002801 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.002865 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.002880 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.002901 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.002913 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:59Z","lastTransitionTime":"2025-12-09T15:09:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.105707 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.105756 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.105771 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.105788 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.105796 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:59Z","lastTransitionTime":"2025-12-09T15:09:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.208863 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.208896 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.208907 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.208921 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.208929 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:59Z","lastTransitionTime":"2025-12-09T15:09:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.213386 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.213404 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:09:59 crc kubenswrapper[4716]: E1209 15:09:59.213583 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:09:59 crc kubenswrapper[4716]: E1209 15:09:59.213802 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.312002 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.312057 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.312075 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.312099 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.312117 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:59Z","lastTransitionTime":"2025-12-09T15:09:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.414913 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.415029 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.415272 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.415302 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.415508 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:59Z","lastTransitionTime":"2025-12-09T15:09:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.518992 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.519088 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.519111 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.519601 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.519657 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:59Z","lastTransitionTime":"2025-12-09T15:09:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.622106 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.622165 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.622174 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.622190 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.622199 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:59Z","lastTransitionTime":"2025-12-09T15:09:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.724510 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.724553 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.724562 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.724579 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.724588 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:59Z","lastTransitionTime":"2025-12-09T15:09:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.827596 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.827670 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.827686 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.827704 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.827714 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:59Z","lastTransitionTime":"2025-12-09T15:09:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.930152 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.930206 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.930222 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.930244 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:09:59 crc kubenswrapper[4716]: I1209 15:09:59.930256 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:09:59Z","lastTransitionTime":"2025-12-09T15:09:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.032201 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.032254 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.032263 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.032280 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.032290 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:00Z","lastTransitionTime":"2025-12-09T15:10:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.135491 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.135562 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.135578 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.135599 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.135612 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:00Z","lastTransitionTime":"2025-12-09T15:10:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.213571 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.213727 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:10:00 crc kubenswrapper[4716]: E1209 15:10:00.213820 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:10:00 crc kubenswrapper[4716]: E1209 15:10:00.213927 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.237796 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.237944 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.237958 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.237974 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.237984 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:00Z","lastTransitionTime":"2025-12-09T15:10:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.341285 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.341345 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.341360 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.341380 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.341391 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:00Z","lastTransitionTime":"2025-12-09T15:10:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.444505 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.444591 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.444608 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.444669 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.444690 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:00Z","lastTransitionTime":"2025-12-09T15:10:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.547255 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.547307 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.547324 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.547346 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.547361 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:00Z","lastTransitionTime":"2025-12-09T15:10:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.650240 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.650290 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.650299 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.650318 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.650327 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:00Z","lastTransitionTime":"2025-12-09T15:10:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.752429 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.752482 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.752493 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.752511 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.752524 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:00Z","lastTransitionTime":"2025-12-09T15:10:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.855149 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.855219 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.855238 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.855262 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.855282 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:00Z","lastTransitionTime":"2025-12-09T15:10:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.958421 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.958469 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.958481 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.958496 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:00 crc kubenswrapper[4716]: I1209 15:10:00.958512 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:00Z","lastTransitionTime":"2025-12-09T15:10:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.062561 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.062616 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.062660 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.062682 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.062698 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:01Z","lastTransitionTime":"2025-12-09T15:10:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.166550 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.166659 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.166671 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.166688 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.166704 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:01Z","lastTransitionTime":"2025-12-09T15:10:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.213553 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.213663 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:10:01 crc kubenswrapper[4716]: E1209 15:10:01.213738 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:10:01 crc kubenswrapper[4716]: E1209 15:10:01.213821 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.269818 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.269916 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.269941 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.270155 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.270176 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:01Z","lastTransitionTime":"2025-12-09T15:10:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.372868 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.372909 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.372921 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.372937 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.372954 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:01Z","lastTransitionTime":"2025-12-09T15:10:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.475386 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.475451 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.475465 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.475489 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.475505 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:01Z","lastTransitionTime":"2025-12-09T15:10:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.578399 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.578443 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.578454 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.578468 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.578477 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:01Z","lastTransitionTime":"2025-12-09T15:10:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.681469 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.681548 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.681569 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.681586 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.681595 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:01Z","lastTransitionTime":"2025-12-09T15:10:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.784255 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.784345 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.784364 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.784396 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.784415 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:01Z","lastTransitionTime":"2025-12-09T15:10:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.887723 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.887782 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.887791 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.887811 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.887823 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:01Z","lastTransitionTime":"2025-12-09T15:10:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.990991 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.991438 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.991455 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.991478 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:01 crc kubenswrapper[4716]: I1209 15:10:01.991493 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:01Z","lastTransitionTime":"2025-12-09T15:10:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.093696 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.093771 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.093782 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.093798 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.093808 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:02Z","lastTransitionTime":"2025-12-09T15:10:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.196367 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.196412 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.196422 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.196439 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.196452 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:02Z","lastTransitionTime":"2025-12-09T15:10:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.213031 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.213116 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:10:02 crc kubenswrapper[4716]: E1209 15:10:02.213717 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:10:02 crc kubenswrapper[4716]: E1209 15:10:02.213968 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.214275 4716 scope.go:117] "RemoveContainer" containerID="50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69" Dec 09 15:10:02 crc kubenswrapper[4716]: E1209 15:10:02.214552 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hcdn4_openshift-ovn-kubernetes(3d953045-e94a-4e04-b78e-bc20b3a8c36c)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.299841 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.299878 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.299888 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.299901 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.299910 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:02Z","lastTransitionTime":"2025-12-09T15:10:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.403041 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.403105 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.403122 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.403147 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.403164 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:02Z","lastTransitionTime":"2025-12-09T15:10:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.505491 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.505529 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.505538 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.505556 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.505565 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:02Z","lastTransitionTime":"2025-12-09T15:10:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.608359 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.608427 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.608439 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.608458 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.608475 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:02Z","lastTransitionTime":"2025-12-09T15:10:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.712046 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.712118 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.712133 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.712162 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.712181 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:02Z","lastTransitionTime":"2025-12-09T15:10:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.815062 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.815121 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.815131 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.815149 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.815166 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:02Z","lastTransitionTime":"2025-12-09T15:10:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.917516 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.917583 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.917607 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.917666 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:02 crc kubenswrapper[4716]: I1209 15:10:02.917692 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:02Z","lastTransitionTime":"2025-12-09T15:10:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.020491 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.020539 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.020551 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.020569 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.020579 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:03Z","lastTransitionTime":"2025-12-09T15:10:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.123503 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.123551 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.123562 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.123580 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.123590 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:03Z","lastTransitionTime":"2025-12-09T15:10:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.212606 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:10:03 crc kubenswrapper[4716]: E1209 15:10:03.212813 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.213064 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:10:03 crc kubenswrapper[4716]: E1209 15:10:03.213152 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.227067 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.227113 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.227125 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.227143 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.227156 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:03Z","lastTransitionTime":"2025-12-09T15:10:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.228272 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b780caa1a0bb6cc5ab88f12e58b49cee3d1af620084dd6f5761852ded17da2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8641bbedb4ac50aa51932e028d9a7cf029d402e4c631170bb3d061357eec6601\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:10:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.243768 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f66b28-96d7-4c4f-b567-7f0917812e0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://05b0adb7ab9ef293fc4852b947c3c00832260edfb0472d0477e3c529cef5a814\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f26a622924a650f69d85073740b7c57cdd5623dc82bb548d9427bf0510a5b83b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e9f884b5c79ae7dc4d07d58f6552b9820cb47af7c106a2686e335dff201e7e4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73572fca101a4dc488e6b1b952fac2edaa08fb84f34906c4a13f21814e49fffc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2846fe2d5bae562626464d442e1a2ff8ebe5d948d9db42b1b72b221ff63b485f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62bb52b4548ba8bf8c7bf3b3975f047e2c3c47d272d8e269b80139d700d1e3c3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://748832180a4f146d3a76e2f8a38f3a1b369962689edd9979ec62ac64057d1576\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:09:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bjm6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qrqxm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:10:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.255417 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-gpl2n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f0e1f1bc-46bd-4293-a9b9-d57c1f83a613\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wxrl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wxrl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:09:10Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-gpl2n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:10:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.272192 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e7ed35d3-6b08-4863-a641-e63313d65d8a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"file observer\\\\nW1209 15:08:50.650736 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1209 15:08:50.650883 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1209 15:08:50.653646 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-878861850/tls.crt::/tmp/serving-cert-878861850/tls.key\\\\\\\"\\\\nI1209 15:08:51.078924 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1209 15:08:51.081464 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1209 15:08:51.081487 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1209 15:08:51.081507 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1209 15:08:51.081513 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1209 15:08:51.086415 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1209 15:08:51.086438 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1209 15:08:51.086457 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086465 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1209 15:08:51.086474 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1209 15:08:51.086478 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1209 15:08:51.086481 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1209 15:08:51.086485 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1209 15:08:51.088638 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:10:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.287486 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8142ea19-bdab-43d4-b6ec-0eb2dea21b60\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51c336339c165a67392862787d5ffdab4fba030a838bcb28cecb6ef0a1b1633a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4977d7373e96d2e9d86ee20a1dadffb84228e1a466bce24e5b5af6c1c6ec93b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4704af0bc981b22b47ed768b34abe27a70ec9445a3b86cbe81e44115c1e6db55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8fe87f75d755671ee24326254af036cce1ac0ef201535b3eb56fde3c3dfea65a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8fe87f75d755671ee24326254af036cce1ac0ef201535b3eb56fde3c3dfea65a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:10:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.304786 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://978c8a7551b6eeece1912c789295e23a823b2856d363cfbf44351ba0fbfc8db9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:10:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.320262 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:10:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.329595 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.329670 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.329685 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.329705 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.329736 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:03Z","lastTransitionTime":"2025-12-09T15:10:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.333015 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a919bdc5-9d91-4ae1-b3c5-53948999a032\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a495c11fb4691d37c344f26e79e7f3e65eba2b6869d139b2477d81c79013a8f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73c1f89c954b2acee10bb93e31a85e94d9b8e8af431d1351c2dc54d69535c33b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73c1f89c954b2acee10bb93e31a85e94d9b8e8af431d1351c2dc54d69535c33b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:10:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.346504 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:10:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.358616 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8clts" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df5b4a13-8990-44d5-8b66-a3b696c9774f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://11c3246d292d0a1c1ca6f46baf71f93f06b00755a208e4e6fed6cc3653408589\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dc6th\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8clts\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:10:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.393263 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2a2d930-856c-4375-8e68-51ecaa63699b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b730df9d371dd6777ff1ba46c584e9cf3b1a99d65d0a1880413036962d7db099\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://db112dc4ac916254c8ca33290be6ad4d91b85d7c9e17872e67916df15ae25428\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8396257b714afac16cd906de4f119eb5875484a204f3071303336dfc00106118\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ac6946b598c9186358ce80d7dfe09dddc7be095a9037265ef4d78a5d9560e1f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a355c46f0a4ee9ca369b17ec4b24b554d0eae49d84a5ce7ce318a92e37d46cd0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://00d7a5ce1b61692a21155afd4c4eb12d16829994e3163be6b3ba0989d5336083\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://00d7a5ce1b61692a21155afd4c4eb12d16829994e3163be6b3ba0989d5336083\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b62285bb69b8af9786731a0ec7b5a2bf83adb8fc922f35db071731ab1f23ab18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b62285bb69b8af9786731a0ec7b5a2bf83adb8fc922f35db071731ab1f23ab18\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://d4cba2091f0b3d2bfce763b41a854b307ff86a2abf3ebf56dcbadce37ee149f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d4cba2091f0b3d2bfce763b41a854b307ff86a2abf3ebf56dcbadce37ee149f5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:10:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.433722 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.434010 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.434143 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.434483 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.434650 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:03Z","lastTransitionTime":"2025-12-09T15:10:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.440025 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3d953045-e94a-4e04-b78e-bc20b3a8c36c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T15:09:48Z\\\",\\\"message\\\":\\\"ng zone local for Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf in node crc\\\\nI1209 15:09:48.032485 6781 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1209 15:09:48.032503 6781 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf after 0 failed attempt(s)\\\\nI1209 15:09:48.032508 6781 default_network_controller.go:776] Recording success event on pod openshift-network-operator/network-operator-58b4c7f79c-55gtf\\\\nF1209 15:09:48.032185 6781 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:09:47Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hcdn4_openshift-ovn-kubernetes(3d953045-e94a-4e04-b78e-bc20b3a8c36c)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-54c5g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hcdn4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:10:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.454898 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rqz4n" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"38b4e174-ba72-4a0f-9eed-f2ce970c0afc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2fd13c7fc5ee8080d3513f100512c1ebd48490bbe1f36ff53396b7aa3d9526b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-09T15:09:42Z\\\",\\\"message\\\":\\\"2025-12-09T15:08:57+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_d33c09ce-c19a-48ac-9883-f562acb4fa1a\\\\n2025-12-09T15:08:57+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_d33c09ce-c19a-48ac-9883-f562acb4fa1a to /host/opt/cni/bin/\\\\n2025-12-09T15:08:57Z [verbose] multus-daemon started\\\\n2025-12-09T15:08:57Z [verbose] Readiness Indicator file check\\\\n2025-12-09T15:09:42Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-plvv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rqz4n\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:10:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.469221 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef5255ac80d96695612cdac86e63f82688271183d2daa61af9122a1bdd801089\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-km64r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-rdkb2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:10:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.480493 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff607c0e-2a04-4599-b107-8ccbfed1d376\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:09:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fada1f30125149bddf6047b764fcdbbecce679857a66b8f2eaa7c00d84a5b9d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c9ead9bb35730e5bec0cd95c265e4918a0dc3dd1dd0330a27316de4f3901108\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:09:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s8vnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:09:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7wtn7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:10:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.492345 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b369173f-e61f-4181-b8f8-61f660dd5e80\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b0f6d691bf82cfd42b332b45993bac1e905497621b1b63bcb563e0c063740625\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fbdd29e81ebf4d501afc2dfeae6fb4fcf3b26aae05a714980315ff018b4b7c1a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://30ad574eaf85bbbc9ecafb46c6712d306a4a3a74c910b901137975f4f3aaf43b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:33Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:10:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.505379 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:51Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:10:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.518281 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7a29c592594652db8d84665b4f95ea14c4bc126de0d508fd9efb6be09d48976c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:10:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.528676 4716 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-q8f5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"92bbf960-39e4-4521-9ba9-c66d302ceb3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-09T15:08:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2496e752c40e1c175f4ea3321eef81439ae0cd8b988efddf694e31b517d47d9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-09T15:08:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fr45r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-09T15:08:55Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-q8f5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-09T15:10:03Z is after 2025-08-24T17:21:41Z" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.537421 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.537468 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.537479 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.537495 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.537505 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:03Z","lastTransitionTime":"2025-12-09T15:10:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.639573 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.639926 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.640046 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.640352 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.640569 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:03Z","lastTransitionTime":"2025-12-09T15:10:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.744097 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.744160 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.744172 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.744190 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.744200 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:03Z","lastTransitionTime":"2025-12-09T15:10:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.846490 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.846539 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.846550 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.846569 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.846581 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:03Z","lastTransitionTime":"2025-12-09T15:10:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.949565 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.949950 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.950108 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.950263 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:03 crc kubenswrapper[4716]: I1209 15:10:03.950348 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:03Z","lastTransitionTime":"2025-12-09T15:10:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.053279 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.053333 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.053350 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.053373 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.053387 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:04Z","lastTransitionTime":"2025-12-09T15:10:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.156217 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.156278 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.156288 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.156324 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.156336 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:04Z","lastTransitionTime":"2025-12-09T15:10:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.213531 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.213689 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:10:04 crc kubenswrapper[4716]: E1209 15:10:04.213901 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:10:04 crc kubenswrapper[4716]: E1209 15:10:04.214031 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.259347 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.259686 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.259787 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.259872 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.259938 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:04Z","lastTransitionTime":"2025-12-09T15:10:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.363366 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.363451 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.363474 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.363503 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.363527 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:04Z","lastTransitionTime":"2025-12-09T15:10:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.467072 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.467609 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.467981 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.468324 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.468489 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:04Z","lastTransitionTime":"2025-12-09T15:10:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.572157 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.572238 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.572265 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.572297 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.572324 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:04Z","lastTransitionTime":"2025-12-09T15:10:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.675033 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.675093 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.675112 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.675139 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.675157 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:04Z","lastTransitionTime":"2025-12-09T15:10:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.777701 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.777759 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.777776 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.777797 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.777812 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:04Z","lastTransitionTime":"2025-12-09T15:10:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.880264 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.880305 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.880313 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.880327 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.880337 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:04Z","lastTransitionTime":"2025-12-09T15:10:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.983261 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.983312 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.983324 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.983345 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:04 crc kubenswrapper[4716]: I1209 15:10:04.983357 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:04Z","lastTransitionTime":"2025-12-09T15:10:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.086317 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.086388 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.086398 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.086411 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.086419 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:05Z","lastTransitionTime":"2025-12-09T15:10:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.189166 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.189205 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.189214 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.189228 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.189237 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:05Z","lastTransitionTime":"2025-12-09T15:10:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.213135 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.213181 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:10:05 crc kubenswrapper[4716]: E1209 15:10:05.213261 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:10:05 crc kubenswrapper[4716]: E1209 15:10:05.213573 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.292170 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.292217 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.292230 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.292247 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.292259 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:05Z","lastTransitionTime":"2025-12-09T15:10:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.395131 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.395184 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.395199 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.395217 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.395228 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:05Z","lastTransitionTime":"2025-12-09T15:10:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.497959 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.498006 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.498015 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.498031 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.498042 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:05Z","lastTransitionTime":"2025-12-09T15:10:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.601108 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.601162 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.601177 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.601195 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.601207 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:05Z","lastTransitionTime":"2025-12-09T15:10:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.703688 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.703734 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.703748 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.703774 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.703784 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:05Z","lastTransitionTime":"2025-12-09T15:10:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.807029 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.807102 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.807114 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.807132 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.807143 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:05Z","lastTransitionTime":"2025-12-09T15:10:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.910321 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.910395 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.910412 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.910435 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:05 crc kubenswrapper[4716]: I1209 15:10:05.910448 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:05Z","lastTransitionTime":"2025-12-09T15:10:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.000543 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.000673 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.000696 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.000725 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.000745 4716 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-09T15:10:06Z","lastTransitionTime":"2025-12-09T15:10:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.058734 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7f89"] Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.059211 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7f89" Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.061567 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.062118 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.062184 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.062218 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.104857 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=75.10483309 podStartE2EDuration="1m15.10483309s" podCreationTimestamp="2025-12-09 15:08:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:06.104539442 +0000 UTC m=+93.259283430" watchObservedRunningTime="2025-12-09 15:10:06.10483309 +0000 UTC m=+93.259577078" Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.118997 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/ea3457ac-3b63-4647-9ead-f31bdd2c7027-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-k7f89\" (UID: \"ea3457ac-3b63-4647-9ead-f31bdd2c7027\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7f89" Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.119045 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ea3457ac-3b63-4647-9ead-f31bdd2c7027-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-k7f89\" (UID: \"ea3457ac-3b63-4647-9ead-f31bdd2c7027\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7f89" Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.119095 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ea3457ac-3b63-4647-9ead-f31bdd2c7027-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-k7f89\" (UID: \"ea3457ac-3b63-4647-9ead-f31bdd2c7027\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7f89" Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.119244 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/ea3457ac-3b63-4647-9ead-f31bdd2c7027-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-k7f89\" (UID: \"ea3457ac-3b63-4647-9ead-f31bdd2c7027\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7f89" Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.119401 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ea3457ac-3b63-4647-9ead-f31bdd2c7027-service-ca\") pod \"cluster-version-operator-5c965bbfc6-k7f89\" (UID: \"ea3457ac-3b63-4647-9ead-f31bdd2c7027\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7f89" Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.120777 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=44.120766516 podStartE2EDuration="44.120766516s" podCreationTimestamp="2025-12-09 15:09:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:06.120135708 +0000 UTC m=+93.274879696" watchObservedRunningTime="2025-12-09 15:10:06.120766516 +0000 UTC m=+93.275510504" Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.183378 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-qrqxm" podStartSLOduration=70.183339859 podStartE2EDuration="1m10.183339859s" podCreationTimestamp="2025-12-09 15:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:06.18301447 +0000 UTC m=+93.337758478" watchObservedRunningTime="2025-12-09 15:10:06.183339859 +0000 UTC m=+93.338083847" Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.193983 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=8.193961776 podStartE2EDuration="8.193961776s" podCreationTimestamp="2025-12-09 15:09:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:06.193459282 +0000 UTC m=+93.348203270" watchObservedRunningTime="2025-12-09 15:10:06.193961776 +0000 UTC m=+93.348705774" Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.213191 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.213260 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:10:06 crc kubenswrapper[4716]: E1209 15:10:06.213331 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:10:06 crc kubenswrapper[4716]: E1209 15:10:06.213432 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.217587 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-8clts" podStartSLOduration=71.217574298 podStartE2EDuration="1m11.217574298s" podCreationTimestamp="2025-12-09 15:08:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:06.216515528 +0000 UTC m=+93.371259526" watchObservedRunningTime="2025-12-09 15:10:06.217574298 +0000 UTC m=+93.372318286" Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.220230 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ea3457ac-3b63-4647-9ead-f31bdd2c7027-service-ca\") pod \"cluster-version-operator-5c965bbfc6-k7f89\" (UID: \"ea3457ac-3b63-4647-9ead-f31bdd2c7027\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7f89" Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.220392 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/ea3457ac-3b63-4647-9ead-f31bdd2c7027-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-k7f89\" (UID: \"ea3457ac-3b63-4647-9ead-f31bdd2c7027\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7f89" Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.220477 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/ea3457ac-3b63-4647-9ead-f31bdd2c7027-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-k7f89\" (UID: \"ea3457ac-3b63-4647-9ead-f31bdd2c7027\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7f89" Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.220532 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ea3457ac-3b63-4647-9ead-f31bdd2c7027-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-k7f89\" (UID: \"ea3457ac-3b63-4647-9ead-f31bdd2c7027\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7f89" Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.220568 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ea3457ac-3b63-4647-9ead-f31bdd2c7027-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-k7f89\" (UID: \"ea3457ac-3b63-4647-9ead-f31bdd2c7027\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7f89" Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.221039 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ea3457ac-3b63-4647-9ead-f31bdd2c7027-service-ca\") pod \"cluster-version-operator-5c965bbfc6-k7f89\" (UID: \"ea3457ac-3b63-4647-9ead-f31bdd2c7027\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7f89" Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.221515 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/ea3457ac-3b63-4647-9ead-f31bdd2c7027-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-k7f89\" (UID: \"ea3457ac-3b63-4647-9ead-f31bdd2c7027\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7f89" Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.221601 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/ea3457ac-3b63-4647-9ead-f31bdd2c7027-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-k7f89\" (UID: \"ea3457ac-3b63-4647-9ead-f31bdd2c7027\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7f89" Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.230596 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ea3457ac-3b63-4647-9ead-f31bdd2c7027-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-k7f89\" (UID: \"ea3457ac-3b63-4647-9ead-f31bdd2c7027\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7f89" Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.236495 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ea3457ac-3b63-4647-9ead-f31bdd2c7027-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-k7f89\" (UID: \"ea3457ac-3b63-4647-9ead-f31bdd2c7027\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7f89" Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.265781 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=15.265759378 podStartE2EDuration="15.265759378s" podCreationTimestamp="2025-12-09 15:09:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:06.240786658 +0000 UTC m=+93.395530646" watchObservedRunningTime="2025-12-09 15:10:06.265759378 +0000 UTC m=+93.420503366" Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.287793 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7wtn7" podStartSLOduration=70.287778284 podStartE2EDuration="1m10.287778284s" podCreationTimestamp="2025-12-09 15:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:06.28763359 +0000 UTC m=+93.442377568" watchObservedRunningTime="2025-12-09 15:10:06.287778284 +0000 UTC m=+93.442522272" Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.301512 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=76.301492988 podStartE2EDuration="1m16.301492988s" podCreationTimestamp="2025-12-09 15:08:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:06.300942803 +0000 UTC m=+93.455686791" watchObservedRunningTime="2025-12-09 15:10:06.301492988 +0000 UTC m=+93.456236976" Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.338241 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-q8f5v" podStartSLOduration=71.338224147 podStartE2EDuration="1m11.338224147s" podCreationTimestamp="2025-12-09 15:08:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:06.337422725 +0000 UTC m=+93.492166713" watchObservedRunningTime="2025-12-09 15:10:06.338224147 +0000 UTC m=+93.492968135" Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.354020 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-rqz4n" podStartSLOduration=71.354003078 podStartE2EDuration="1m11.354003078s" podCreationTimestamp="2025-12-09 15:08:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:06.353667819 +0000 UTC m=+93.508411807" watchObservedRunningTime="2025-12-09 15:10:06.354003078 +0000 UTC m=+93.508747066" Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.372821 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7f89" Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.715695 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7f89" event={"ID":"ea3457ac-3b63-4647-9ead-f31bdd2c7027","Type":"ContainerStarted","Data":"7c489a43d06932924c6584e3f460e3869d19a72f456a1268dd06dadf98c81bdf"} Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.715739 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7f89" event={"ID":"ea3457ac-3b63-4647-9ead-f31bdd2c7027","Type":"ContainerStarted","Data":"6e9e8a1113f53d7a00d8d50049913d789c2f8c68f4bb774b9bc0f898af678701"} Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.731421 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-k7f89" podStartSLOduration=70.731386279 podStartE2EDuration="1m10.731386279s" podCreationTimestamp="2025-12-09 15:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:06.731034309 +0000 UTC m=+93.885778297" watchObservedRunningTime="2025-12-09 15:10:06.731386279 +0000 UTC m=+93.886130317" Dec 09 15:10:06 crc kubenswrapper[4716]: I1209 15:10:06.732082 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podStartSLOduration=71.732068118 podStartE2EDuration="1m11.732068118s" podCreationTimestamp="2025-12-09 15:08:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:06.3658486 +0000 UTC m=+93.520592588" watchObservedRunningTime="2025-12-09 15:10:06.732068118 +0000 UTC m=+93.886812156" Dec 09 15:10:07 crc kubenswrapper[4716]: I1209 15:10:07.213002 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:10:07 crc kubenswrapper[4716]: E1209 15:10:07.213250 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:10:07 crc kubenswrapper[4716]: I1209 15:10:07.213345 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:10:07 crc kubenswrapper[4716]: E1209 15:10:07.213492 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:10:08 crc kubenswrapper[4716]: I1209 15:10:08.212507 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:10:08 crc kubenswrapper[4716]: E1209 15:10:08.212929 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:10:08 crc kubenswrapper[4716]: I1209 15:10:08.213190 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:10:08 crc kubenswrapper[4716]: E1209 15:10:08.213597 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:10:09 crc kubenswrapper[4716]: I1209 15:10:09.213889 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:10:09 crc kubenswrapper[4716]: I1209 15:10:09.214014 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:10:09 crc kubenswrapper[4716]: E1209 15:10:09.214380 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:10:09 crc kubenswrapper[4716]: E1209 15:10:09.214542 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:10:10 crc kubenswrapper[4716]: I1209 15:10:10.213565 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:10:10 crc kubenswrapper[4716]: I1209 15:10:10.213694 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:10:10 crc kubenswrapper[4716]: E1209 15:10:10.214035 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:10:10 crc kubenswrapper[4716]: E1209 15:10:10.214245 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:10:11 crc kubenswrapper[4716]: I1209 15:10:11.213405 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:10:11 crc kubenswrapper[4716]: E1209 15:10:11.214355 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:10:11 crc kubenswrapper[4716]: I1209 15:10:11.214820 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:10:11 crc kubenswrapper[4716]: E1209 15:10:11.215024 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:10:12 crc kubenswrapper[4716]: I1209 15:10:12.213385 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:10:12 crc kubenswrapper[4716]: I1209 15:10:12.213560 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:10:12 crc kubenswrapper[4716]: E1209 15:10:12.213785 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:10:12 crc kubenswrapper[4716]: E1209 15:10:12.213942 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:10:13 crc kubenswrapper[4716]: I1209 15:10:13.213087 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:10:13 crc kubenswrapper[4716]: I1209 15:10:13.213121 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:10:13 crc kubenswrapper[4716]: E1209 15:10:13.214159 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:10:13 crc kubenswrapper[4716]: E1209 15:10:13.214316 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:10:14 crc kubenswrapper[4716]: I1209 15:10:14.212887 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:10:14 crc kubenswrapper[4716]: I1209 15:10:14.212895 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:10:14 crc kubenswrapper[4716]: E1209 15:10:14.213020 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:10:14 crc kubenswrapper[4716]: E1209 15:10:14.213110 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:10:14 crc kubenswrapper[4716]: I1209 15:10:14.715557 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f0e1f1bc-46bd-4293-a9b9-d57c1f83a613-metrics-certs\") pod \"network-metrics-daemon-gpl2n\" (UID: \"f0e1f1bc-46bd-4293-a9b9-d57c1f83a613\") " pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:10:14 crc kubenswrapper[4716]: E1209 15:10:14.715760 4716 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 09 15:10:14 crc kubenswrapper[4716]: E1209 15:10:14.715872 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f0e1f1bc-46bd-4293-a9b9-d57c1f83a613-metrics-certs podName:f0e1f1bc-46bd-4293-a9b9-d57c1f83a613 nodeName:}" failed. No retries permitted until 2025-12-09 15:11:18.715850534 +0000 UTC m=+165.870594522 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f0e1f1bc-46bd-4293-a9b9-d57c1f83a613-metrics-certs") pod "network-metrics-daemon-gpl2n" (UID: "f0e1f1bc-46bd-4293-a9b9-d57c1f83a613") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 09 15:10:15 crc kubenswrapper[4716]: I1209 15:10:15.213783 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:10:15 crc kubenswrapper[4716]: I1209 15:10:15.213846 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:10:15 crc kubenswrapper[4716]: I1209 15:10:15.214760 4716 scope.go:117] "RemoveContainer" containerID="50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69" Dec 09 15:10:15 crc kubenswrapper[4716]: E1209 15:10:15.215099 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hcdn4_openshift-ovn-kubernetes(3d953045-e94a-4e04-b78e-bc20b3a8c36c)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" Dec 09 15:10:15 crc kubenswrapper[4716]: E1209 15:10:15.215371 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:10:15 crc kubenswrapper[4716]: E1209 15:10:15.215482 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:10:16 crc kubenswrapper[4716]: I1209 15:10:16.212956 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:10:16 crc kubenswrapper[4716]: I1209 15:10:16.212985 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:10:16 crc kubenswrapper[4716]: E1209 15:10:16.213268 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:10:16 crc kubenswrapper[4716]: E1209 15:10:16.213387 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:10:17 crc kubenswrapper[4716]: I1209 15:10:17.213206 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:10:17 crc kubenswrapper[4716]: I1209 15:10:17.213361 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:10:17 crc kubenswrapper[4716]: E1209 15:10:17.213536 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:10:17 crc kubenswrapper[4716]: E1209 15:10:17.214344 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:10:18 crc kubenswrapper[4716]: I1209 15:10:18.213148 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:10:18 crc kubenswrapper[4716]: I1209 15:10:18.213270 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:10:18 crc kubenswrapper[4716]: E1209 15:10:18.213324 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:10:18 crc kubenswrapper[4716]: E1209 15:10:18.213398 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:10:19 crc kubenswrapper[4716]: I1209 15:10:19.213690 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:10:19 crc kubenswrapper[4716]: I1209 15:10:19.213812 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:10:19 crc kubenswrapper[4716]: E1209 15:10:19.213841 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:10:19 crc kubenswrapper[4716]: E1209 15:10:19.213967 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:10:20 crc kubenswrapper[4716]: I1209 15:10:20.213261 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:10:20 crc kubenswrapper[4716]: I1209 15:10:20.213325 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:10:20 crc kubenswrapper[4716]: E1209 15:10:20.213485 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:10:20 crc kubenswrapper[4716]: E1209 15:10:20.213708 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:10:21 crc kubenswrapper[4716]: I1209 15:10:21.213577 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:10:21 crc kubenswrapper[4716]: I1209 15:10:21.213611 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:10:21 crc kubenswrapper[4716]: E1209 15:10:21.213998 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:10:21 crc kubenswrapper[4716]: E1209 15:10:21.214115 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:10:22 crc kubenswrapper[4716]: I1209 15:10:22.213413 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:10:22 crc kubenswrapper[4716]: I1209 15:10:22.213450 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:10:22 crc kubenswrapper[4716]: E1209 15:10:22.213587 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:10:22 crc kubenswrapper[4716]: E1209 15:10:22.213783 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:10:23 crc kubenswrapper[4716]: I1209 15:10:23.213139 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:10:23 crc kubenswrapper[4716]: I1209 15:10:23.213220 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:10:23 crc kubenswrapper[4716]: E1209 15:10:23.214409 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:10:23 crc kubenswrapper[4716]: E1209 15:10:23.214684 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:10:24 crc kubenswrapper[4716]: I1209 15:10:24.212923 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:10:24 crc kubenswrapper[4716]: E1209 15:10:24.213079 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:10:24 crc kubenswrapper[4716]: I1209 15:10:24.212941 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:10:24 crc kubenswrapper[4716]: E1209 15:10:24.213179 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:10:25 crc kubenswrapper[4716]: I1209 15:10:25.212773 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:10:25 crc kubenswrapper[4716]: I1209 15:10:25.212837 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:10:25 crc kubenswrapper[4716]: E1209 15:10:25.212883 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:10:25 crc kubenswrapper[4716]: E1209 15:10:25.212978 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:10:26 crc kubenswrapper[4716]: I1209 15:10:26.213235 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:10:26 crc kubenswrapper[4716]: I1209 15:10:26.213377 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:10:26 crc kubenswrapper[4716]: E1209 15:10:26.213589 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:10:26 crc kubenswrapper[4716]: E1209 15:10:26.213849 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:10:27 crc kubenswrapper[4716]: I1209 15:10:27.212978 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:10:27 crc kubenswrapper[4716]: I1209 15:10:27.213087 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:10:27 crc kubenswrapper[4716]: E1209 15:10:27.213197 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:10:27 crc kubenswrapper[4716]: E1209 15:10:27.213911 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:10:27 crc kubenswrapper[4716]: I1209 15:10:27.214426 4716 scope.go:117] "RemoveContainer" containerID="50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69" Dec 09 15:10:27 crc kubenswrapper[4716]: E1209 15:10:27.214761 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hcdn4_openshift-ovn-kubernetes(3d953045-e94a-4e04-b78e-bc20b3a8c36c)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" Dec 09 15:10:28 crc kubenswrapper[4716]: I1209 15:10:28.212807 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:10:28 crc kubenswrapper[4716]: E1209 15:10:28.212969 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:10:28 crc kubenswrapper[4716]: I1209 15:10:28.213104 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:10:28 crc kubenswrapper[4716]: E1209 15:10:28.213311 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:10:28 crc kubenswrapper[4716]: I1209 15:10:28.790477 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rqz4n_38b4e174-ba72-4a0f-9eed-f2ce970c0afc/kube-multus/1.log" Dec 09 15:10:28 crc kubenswrapper[4716]: I1209 15:10:28.790984 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rqz4n_38b4e174-ba72-4a0f-9eed-f2ce970c0afc/kube-multus/0.log" Dec 09 15:10:28 crc kubenswrapper[4716]: I1209 15:10:28.791027 4716 generic.go:334] "Generic (PLEG): container finished" podID="38b4e174-ba72-4a0f-9eed-f2ce970c0afc" containerID="a2fd13c7fc5ee8080d3513f100512c1ebd48490bbe1f36ff53396b7aa3d9526b" exitCode=1 Dec 09 15:10:28 crc kubenswrapper[4716]: I1209 15:10:28.791070 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rqz4n" event={"ID":"38b4e174-ba72-4a0f-9eed-f2ce970c0afc","Type":"ContainerDied","Data":"a2fd13c7fc5ee8080d3513f100512c1ebd48490bbe1f36ff53396b7aa3d9526b"} Dec 09 15:10:28 crc kubenswrapper[4716]: I1209 15:10:28.791139 4716 scope.go:117] "RemoveContainer" containerID="b24193016058646d088f754d6947941b30459264f242e093f75f7d48ba8f31d4" Dec 09 15:10:28 crc kubenswrapper[4716]: I1209 15:10:28.791666 4716 scope.go:117] "RemoveContainer" containerID="a2fd13c7fc5ee8080d3513f100512c1ebd48490bbe1f36ff53396b7aa3d9526b" Dec 09 15:10:28 crc kubenswrapper[4716]: E1209 15:10:28.791887 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-rqz4n_openshift-multus(38b4e174-ba72-4a0f-9eed-f2ce970c0afc)\"" pod="openshift-multus/multus-rqz4n" podUID="38b4e174-ba72-4a0f-9eed-f2ce970c0afc" Dec 09 15:10:29 crc kubenswrapper[4716]: I1209 15:10:29.212905 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:10:29 crc kubenswrapper[4716]: I1209 15:10:29.212967 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:10:29 crc kubenswrapper[4716]: E1209 15:10:29.213098 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:10:29 crc kubenswrapper[4716]: E1209 15:10:29.213219 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:10:29 crc kubenswrapper[4716]: I1209 15:10:29.795037 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rqz4n_38b4e174-ba72-4a0f-9eed-f2ce970c0afc/kube-multus/1.log" Dec 09 15:10:30 crc kubenswrapper[4716]: I1209 15:10:30.213043 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:10:30 crc kubenswrapper[4716]: I1209 15:10:30.213182 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:10:30 crc kubenswrapper[4716]: E1209 15:10:30.213316 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:10:30 crc kubenswrapper[4716]: E1209 15:10:30.213414 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:10:31 crc kubenswrapper[4716]: I1209 15:10:31.213340 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:10:31 crc kubenswrapper[4716]: E1209 15:10:31.213500 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:10:31 crc kubenswrapper[4716]: I1209 15:10:31.214196 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:10:31 crc kubenswrapper[4716]: E1209 15:10:31.214395 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:10:32 crc kubenswrapper[4716]: I1209 15:10:32.212832 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:10:32 crc kubenswrapper[4716]: I1209 15:10:32.212880 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:10:32 crc kubenswrapper[4716]: E1209 15:10:32.213050 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:10:32 crc kubenswrapper[4716]: E1209 15:10:32.213160 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:10:33 crc kubenswrapper[4716]: E1209 15:10:33.138672 4716 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Dec 09 15:10:33 crc kubenswrapper[4716]: I1209 15:10:33.212978 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:10:33 crc kubenswrapper[4716]: I1209 15:10:33.213034 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:10:33 crc kubenswrapper[4716]: E1209 15:10:33.214331 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:10:33 crc kubenswrapper[4716]: E1209 15:10:33.214557 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:10:33 crc kubenswrapper[4716]: E1209 15:10:33.304417 4716 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 09 15:10:34 crc kubenswrapper[4716]: I1209 15:10:34.213051 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:10:34 crc kubenswrapper[4716]: I1209 15:10:34.213197 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:10:34 crc kubenswrapper[4716]: E1209 15:10:34.213940 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:10:34 crc kubenswrapper[4716]: E1209 15:10:34.214135 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:10:35 crc kubenswrapper[4716]: I1209 15:10:35.213168 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:10:35 crc kubenswrapper[4716]: I1209 15:10:35.213180 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:10:35 crc kubenswrapper[4716]: E1209 15:10:35.213384 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:10:35 crc kubenswrapper[4716]: E1209 15:10:35.213536 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:10:36 crc kubenswrapper[4716]: I1209 15:10:36.213293 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:10:36 crc kubenswrapper[4716]: E1209 15:10:36.213452 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:10:36 crc kubenswrapper[4716]: I1209 15:10:36.213519 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:10:36 crc kubenswrapper[4716]: E1209 15:10:36.213572 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:10:37 crc kubenswrapper[4716]: I1209 15:10:37.213780 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:10:37 crc kubenswrapper[4716]: E1209 15:10:37.213902 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:10:37 crc kubenswrapper[4716]: I1209 15:10:37.214059 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:10:37 crc kubenswrapper[4716]: E1209 15:10:37.214271 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:10:38 crc kubenswrapper[4716]: I1209 15:10:38.213056 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:10:38 crc kubenswrapper[4716]: I1209 15:10:38.213129 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:10:38 crc kubenswrapper[4716]: E1209 15:10:38.213213 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:10:38 crc kubenswrapper[4716]: E1209 15:10:38.213268 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:10:38 crc kubenswrapper[4716]: E1209 15:10:38.305664 4716 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 09 15:10:39 crc kubenswrapper[4716]: I1209 15:10:39.213269 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:10:39 crc kubenswrapper[4716]: I1209 15:10:39.213290 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:10:39 crc kubenswrapper[4716]: E1209 15:10:39.213773 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:10:39 crc kubenswrapper[4716]: E1209 15:10:39.214006 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:10:39 crc kubenswrapper[4716]: I1209 15:10:39.214141 4716 scope.go:117] "RemoveContainer" containerID="50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69" Dec 09 15:10:39 crc kubenswrapper[4716]: I1209 15:10:39.830734 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hcdn4_3d953045-e94a-4e04-b78e-bc20b3a8c36c/ovnkube-controller/3.log" Dec 09 15:10:39 crc kubenswrapper[4716]: I1209 15:10:39.833518 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" event={"ID":"3d953045-e94a-4e04-b78e-bc20b3a8c36c","Type":"ContainerStarted","Data":"0b589e5aa8dc33433e843fe2d9be871a2c60c060d3dd99b4b8682b4b377c6b43"} Dec 09 15:10:39 crc kubenswrapper[4716]: I1209 15:10:39.833994 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:10:39 crc kubenswrapper[4716]: I1209 15:10:39.865410 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" podStartSLOduration=103.865395644 podStartE2EDuration="1m43.865395644s" podCreationTimestamp="2025-12-09 15:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:39.864409186 +0000 UTC m=+127.019153174" watchObservedRunningTime="2025-12-09 15:10:39.865395644 +0000 UTC m=+127.020139632" Dec 09 15:10:40 crc kubenswrapper[4716]: I1209 15:10:40.074818 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-gpl2n"] Dec 09 15:10:40 crc kubenswrapper[4716]: I1209 15:10:40.074923 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:10:40 crc kubenswrapper[4716]: E1209 15:10:40.075009 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:10:40 crc kubenswrapper[4716]: I1209 15:10:40.213016 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:10:40 crc kubenswrapper[4716]: I1209 15:10:40.213484 4716 scope.go:117] "RemoveContainer" containerID="a2fd13c7fc5ee8080d3513f100512c1ebd48490bbe1f36ff53396b7aa3d9526b" Dec 09 15:10:40 crc kubenswrapper[4716]: E1209 15:10:40.213559 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:10:41 crc kubenswrapper[4716]: I1209 15:10:41.213091 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:10:41 crc kubenswrapper[4716]: I1209 15:10:41.213245 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:10:41 crc kubenswrapper[4716]: E1209 15:10:41.213418 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:10:41 crc kubenswrapper[4716]: I1209 15:10:41.213549 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:10:41 crc kubenswrapper[4716]: E1209 15:10:41.213665 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:10:41 crc kubenswrapper[4716]: E1209 15:10:41.213912 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:10:41 crc kubenswrapper[4716]: I1209 15:10:41.843802 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rqz4n_38b4e174-ba72-4a0f-9eed-f2ce970c0afc/kube-multus/1.log" Dec 09 15:10:41 crc kubenswrapper[4716]: I1209 15:10:41.843875 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rqz4n" event={"ID":"38b4e174-ba72-4a0f-9eed-f2ce970c0afc","Type":"ContainerStarted","Data":"a758ac06c7e4b0f191b12fd2d9cd2ef906c39fedcb80562792f0ef219b1f9298"} Dec 09 15:10:42 crc kubenswrapper[4716]: I1209 15:10:42.213787 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:10:42 crc kubenswrapper[4716]: E1209 15:10:42.214017 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 09 15:10:43 crc kubenswrapper[4716]: I1209 15:10:43.213632 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:10:43 crc kubenswrapper[4716]: I1209 15:10:43.213679 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:10:43 crc kubenswrapper[4716]: I1209 15:10:43.213711 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:10:43 crc kubenswrapper[4716]: E1209 15:10:43.216107 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 09 15:10:43 crc kubenswrapper[4716]: E1209 15:10:43.216176 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-gpl2n" podUID="f0e1f1bc-46bd-4293-a9b9-d57c1f83a613" Dec 09 15:10:43 crc kubenswrapper[4716]: E1209 15:10:43.216227 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 09 15:10:44 crc kubenswrapper[4716]: I1209 15:10:44.213563 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:10:44 crc kubenswrapper[4716]: I1209 15:10:44.216804 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Dec 09 15:10:44 crc kubenswrapper[4716]: I1209 15:10:44.216886 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Dec 09 15:10:45 crc kubenswrapper[4716]: I1209 15:10:45.212708 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:10:45 crc kubenswrapper[4716]: I1209 15:10:45.212763 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:10:45 crc kubenswrapper[4716]: I1209 15:10:45.212771 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:10:45 crc kubenswrapper[4716]: I1209 15:10:45.215714 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Dec 09 15:10:45 crc kubenswrapper[4716]: I1209 15:10:45.218764 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Dec 09 15:10:45 crc kubenswrapper[4716]: I1209 15:10:45.220054 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Dec 09 15:10:45 crc kubenswrapper[4716]: I1209 15:10:45.220239 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.886941 4716 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.937548 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-z4jgs"] Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.938416 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-bs6mn"] Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.938766 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-w6p7n"] Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.939113 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-pfn66"] Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.939475 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pfn66" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.939895 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-bs6mn" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.940190 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-w6p7n" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.941201 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.942569 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.942696 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-8svp5"] Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.943362 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-8svp5" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.944076 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-bj2zv"] Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.944437 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.945052 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-87wwr"] Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.945812 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-87wwr" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.947215 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.947462 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-9ndtx"] Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.947856 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-9ndtx" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.948355 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.948559 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.948602 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.948923 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.949203 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.950681 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.950726 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.951027 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.951098 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.951192 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.951375 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.951446 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.951648 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.951671 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.951892 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.951918 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.951944 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.951679 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.952459 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.953789 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.953998 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.954195 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.954220 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.955836 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dxrvh"] Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.955948 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.955979 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.956128 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.964760 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.965103 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.965922 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-gjrqw"] Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.966673 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dxrvh" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.966852 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-gjrqw" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.968473 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-ckjw6"] Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.970876 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.970962 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.972374 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.972722 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.972967 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.974451 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-ckjw6" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.974898 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.976115 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.976550 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.986789 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.987053 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.987258 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.987275 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.987665 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.988604 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.988917 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-8qjxz"] Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.989380 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.989492 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-8qjxz" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.994765 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.994992 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Dec 09 15:10:46 crc kubenswrapper[4716]: I1209 15:10:46.999840 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.000174 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.000356 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.000500 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.000660 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.001009 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.001178 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.001320 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.001430 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.009073 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.009565 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.010060 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.010136 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.010504 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.010776 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.011139 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.011257 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.012159 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.013893 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.013904 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.014177 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.013959 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.014876 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.015051 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.015502 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.015588 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-nt28v"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.016261 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-hswzp"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.016721 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-hswzp" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.017055 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-nt28v" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.017087 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-jk4jp"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.018917 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.018987 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-jk4jp" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.019125 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.019313 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.019502 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.019652 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.019834 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-vd9wq"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.027265 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-vd9wq" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.028068 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-ht662"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.029524 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.033524 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.038406 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-w9bdm"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.043718 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-w9bdm" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.044339 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.044776 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.044971 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.048476 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-2xfp8"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.061012 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.061080 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.066016 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.066811 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.067523 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-2xfp8" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.067969 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.068645 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.069771 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.069960 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.070152 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.072036 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.072283 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-b25nc"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.072985 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-b25nc" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.073408 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.073508 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.074084 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-qj9vv"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.074973 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qj9vv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.076435 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-ktjk2"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.078172 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.079277 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-qwkjm"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.079726 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-68bqx"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.079735 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.080030 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.080127 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c74ns"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.080496 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c74ns" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.080540 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.080677 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.080699 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-447g6"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.080771 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ktjk2" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.080812 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.080940 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-qwkjm" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.081133 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-68bqx" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.080951 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.081547 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-447g6" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.080988 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.081118 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.083890 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.084057 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-z4542"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.084687 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-z4542" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.086084 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cjqcg"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.088073 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cjqcg" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.088688 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hghbz\" (UniqueName: \"kubernetes.io/projected/646b361f-bd5c-4be0-a52c-735688c0f88a-kube-api-access-hghbz\") pod \"openshift-apiserver-operator-796bbdcf4f-9ndtx\" (UID: \"646b361f-bd5c-4be0-a52c-735688c0f88a\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-9ndtx" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.089047 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.091342 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/002d496a-6bac-491c-bf18-2b4a325f78ec-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-dxrvh\" (UID: \"002d496a-6bac-491c-bf18-2b4a325f78ec\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dxrvh" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.091412 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/326772eb-ce2f-4f9b-bee5-8265a8762f26-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-8svp5\" (UID: \"326772eb-ce2f-4f9b-bee5-8265a8762f26\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-8svp5" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.091443 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/326772eb-ce2f-4f9b-bee5-8265a8762f26-audit-policies\") pod \"apiserver-7bbb656c7d-8svp5\" (UID: \"326772eb-ce2f-4f9b-bee5-8265a8762f26\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-8svp5" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.091468 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/8c6090d1-1d37-4305-9cbf-3c76c3237777-console-serving-cert\") pod \"console-f9d7485db-ckjw6\" (UID: \"8c6090d1-1d37-4305-9cbf-3c76c3237777\") " pod="openshift-console/console-f9d7485db-ckjw6" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.091486 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.091507 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.091538 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vw2dm\" (UniqueName: \"kubernetes.io/projected/d90937ae-2446-41f4-94eb-e928c5d449de-kube-api-access-vw2dm\") pod \"route-controller-manager-6576b87f9c-pfn66\" (UID: \"d90937ae-2446-41f4-94eb-e928c5d449de\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pfn66" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.091553 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/326772eb-ce2f-4f9b-bee5-8265a8762f26-etcd-client\") pod \"apiserver-7bbb656c7d-8svp5\" (UID: \"326772eb-ce2f-4f9b-bee5-8265a8762f26\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-8svp5" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.091570 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.091586 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3924e326-c048-4d29-883f-9b2a07edfe5a-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-gjrqw\" (UID: \"3924e326-c048-4d29-883f-9b2a07edfe5a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gjrqw" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.091606 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/277458f8-729e-4250-b4d0-db21713e4e48-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-w6p7n\" (UID: \"277458f8-729e-4250-b4d0-db21713e4e48\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-w6p7n" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.091641 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/8c6090d1-1d37-4305-9cbf-3c76c3237777-console-config\") pod \"console-f9d7485db-ckjw6\" (UID: \"8c6090d1-1d37-4305-9cbf-3c76c3237777\") " pod="openshift-console/console-f9d7485db-ckjw6" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.091657 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-audit-dir\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.091677 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rbmfc\" (UniqueName: \"kubernetes.io/projected/a85db618-f9aa-44e6-8060-3b660c70940d-kube-api-access-rbmfc\") pod \"machine-approver-56656f9798-87wwr\" (UID: \"a85db618-f9aa-44e6-8060-3b660c70940d\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-87wwr" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.091702 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h5sc4\" (UniqueName: \"kubernetes.io/projected/a16e5eaf-14b0-47eb-8bc9-dfedd024c719-kube-api-access-h5sc4\") pod \"apiserver-76f77b778f-z4jgs\" (UID: \"a16e5eaf-14b0-47eb-8bc9-dfedd024c719\") " pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.091721 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/326772eb-ce2f-4f9b-bee5-8265a8762f26-audit-dir\") pod \"apiserver-7bbb656c7d-8svp5\" (UID: \"326772eb-ce2f-4f9b-bee5-8265a8762f26\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-8svp5" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.091748 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/a16e5eaf-14b0-47eb-8bc9-dfedd024c719-etcd-serving-ca\") pod \"apiserver-76f77b778f-z4jgs\" (UID: \"a16e5eaf-14b0-47eb-8bc9-dfedd024c719\") " pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.091768 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4acd966d-4bae-456f-bffd-9ad6533cc66d-client-ca\") pod \"controller-manager-879f6c89f-bs6mn\" (UID: \"4acd966d-4bae-456f-bffd-9ad6533cc66d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-bs6mn" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.091783 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/a16e5eaf-14b0-47eb-8bc9-dfedd024c719-audit\") pod \"apiserver-76f77b778f-z4jgs\" (UID: \"a16e5eaf-14b0-47eb-8bc9-dfedd024c719\") " pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.091801 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9r42\" (UniqueName: \"kubernetes.io/projected/002d496a-6bac-491c-bf18-2b4a325f78ec-kube-api-access-g9r42\") pod \"cluster-image-registry-operator-dc59b4c8b-dxrvh\" (UID: \"002d496a-6bac-491c-bf18-2b4a325f78ec\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dxrvh" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.091819 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4acd966d-4bae-456f-bffd-9ad6533cc66d-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-bs6mn\" (UID: \"4acd966d-4bae-456f-bffd-9ad6533cc66d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-bs6mn" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.091849 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k4bm5\" (UniqueName: \"kubernetes.io/projected/277458f8-729e-4250-b4d0-db21713e4e48-kube-api-access-k4bm5\") pod \"machine-api-operator-5694c8668f-w6p7n\" (UID: \"277458f8-729e-4250-b4d0-db21713e4e48\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-w6p7n" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.091870 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d90937ae-2446-41f4-94eb-e928c5d449de-config\") pod \"route-controller-manager-6576b87f9c-pfn66\" (UID: \"d90937ae-2446-41f4-94eb-e928c5d449de\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pfn66" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.091889 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/8c6090d1-1d37-4305-9cbf-3c76c3237777-oauth-serving-cert\") pod \"console-f9d7485db-ckjw6\" (UID: \"8c6090d1-1d37-4305-9cbf-3c76c3237777\") " pod="openshift-console/console-f9d7485db-ckjw6" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.091907 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/002d496a-6bac-491c-bf18-2b4a325f78ec-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-dxrvh\" (UID: \"002d496a-6bac-491c-bf18-2b4a325f78ec\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dxrvh" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.091936 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3924e326-c048-4d29-883f-9b2a07edfe5a-config\") pod \"authentication-operator-69f744f599-gjrqw\" (UID: \"3924e326-c048-4d29-883f-9b2a07edfe5a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gjrqw" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.091952 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a16e5eaf-14b0-47eb-8bc9-dfedd024c719-trusted-ca-bundle\") pod \"apiserver-76f77b778f-z4jgs\" (UID: \"a16e5eaf-14b0-47eb-8bc9-dfedd024c719\") " pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.091970 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/277458f8-729e-4250-b4d0-db21713e4e48-config\") pod \"machine-api-operator-5694c8668f-w6p7n\" (UID: \"277458f8-729e-4250-b4d0-db21713e4e48\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-w6p7n" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.091997 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3924e326-c048-4d29-883f-9b2a07edfe5a-serving-cert\") pod \"authentication-operator-69f744f599-gjrqw\" (UID: \"3924e326-c048-4d29-883f-9b2a07edfe5a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gjrqw" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092014 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a16e5eaf-14b0-47eb-8bc9-dfedd024c719-config\") pod \"apiserver-76f77b778f-z4jgs\" (UID: \"a16e5eaf-14b0-47eb-8bc9-dfedd024c719\") " pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092033 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092047 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d90937ae-2446-41f4-94eb-e928c5d449de-serving-cert\") pod \"route-controller-manager-6576b87f9c-pfn66\" (UID: \"d90937ae-2446-41f4-94eb-e928c5d449de\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pfn66" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092063 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092076 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4acd966d-4bae-456f-bffd-9ad6533cc66d-config\") pod \"controller-manager-879f6c89f-bs6mn\" (UID: \"4acd966d-4bae-456f-bffd-9ad6533cc66d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-bs6mn" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092090 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d90937ae-2446-41f4-94eb-e928c5d449de-client-ca\") pod \"route-controller-manager-6576b87f9c-pfn66\" (UID: \"d90937ae-2446-41f4-94eb-e928c5d449de\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pfn66" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092107 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/a16e5eaf-14b0-47eb-8bc9-dfedd024c719-etcd-client\") pod \"apiserver-76f77b778f-z4jgs\" (UID: \"a16e5eaf-14b0-47eb-8bc9-dfedd024c719\") " pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092123 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zt54k\" (UniqueName: \"kubernetes.io/projected/4acd966d-4bae-456f-bffd-9ad6533cc66d-kube-api-access-zt54k\") pod \"controller-manager-879f6c89f-bs6mn\" (UID: \"4acd966d-4bae-456f-bffd-9ad6533cc66d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-bs6mn" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092141 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092158 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z7cjh\" (UniqueName: \"kubernetes.io/projected/3924e326-c048-4d29-883f-9b2a07edfe5a-kube-api-access-z7cjh\") pod \"authentication-operator-69f744f599-gjrqw\" (UID: \"3924e326-c048-4d29-883f-9b2a07edfe5a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gjrqw" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092174 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rvdzf\" (UniqueName: \"kubernetes.io/projected/326772eb-ce2f-4f9b-bee5-8265a8762f26-kube-api-access-rvdzf\") pod \"apiserver-7bbb656c7d-8svp5\" (UID: \"326772eb-ce2f-4f9b-bee5-8265a8762f26\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-8svp5" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092191 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/646b361f-bd5c-4be0-a52c-735688c0f88a-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-9ndtx\" (UID: \"646b361f-bd5c-4be0-a52c-735688c0f88a\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-9ndtx" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092206 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-647nf\" (UniqueName: \"kubernetes.io/projected/8c6090d1-1d37-4305-9cbf-3c76c3237777-kube-api-access-647nf\") pod \"console-f9d7485db-ckjw6\" (UID: \"8c6090d1-1d37-4305-9cbf-3c76c3237777\") " pod="openshift-console/console-f9d7485db-ckjw6" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092224 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-snswq\" (UniqueName: \"kubernetes.io/projected/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-kube-api-access-snswq\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092238 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4acd966d-4bae-456f-bffd-9ad6533cc66d-serving-cert\") pod \"controller-manager-879f6c89f-bs6mn\" (UID: \"4acd966d-4bae-456f-bffd-9ad6533cc66d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-bs6mn" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092255 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/002d496a-6bac-491c-bf18-2b4a325f78ec-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-dxrvh\" (UID: \"002d496a-6bac-491c-bf18-2b4a325f78ec\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dxrvh" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092273 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/646b361f-bd5c-4be0-a52c-735688c0f88a-config\") pod \"openshift-apiserver-operator-796bbdcf4f-9ndtx\" (UID: \"646b361f-bd5c-4be0-a52c-735688c0f88a\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-9ndtx" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092290 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092309 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a85db618-f9aa-44e6-8060-3b660c70940d-config\") pod \"machine-approver-56656f9798-87wwr\" (UID: \"a85db618-f9aa-44e6-8060-3b660c70940d\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-87wwr" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092324 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/a16e5eaf-14b0-47eb-8bc9-dfedd024c719-image-import-ca\") pod \"apiserver-76f77b778f-z4jgs\" (UID: \"a16e5eaf-14b0-47eb-8bc9-dfedd024c719\") " pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092342 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092357 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/a85db618-f9aa-44e6-8060-3b660c70940d-machine-approver-tls\") pod \"machine-approver-56656f9798-87wwr\" (UID: \"a85db618-f9aa-44e6-8060-3b660c70940d\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-87wwr" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092373 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/277458f8-729e-4250-b4d0-db21713e4e48-images\") pod \"machine-api-operator-5694c8668f-w6p7n\" (UID: \"277458f8-729e-4250-b4d0-db21713e4e48\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-w6p7n" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092387 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a85db618-f9aa-44e6-8060-3b660c70940d-auth-proxy-config\") pod \"machine-approver-56656f9798-87wwr\" (UID: \"a85db618-f9aa-44e6-8060-3b660c70940d\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-87wwr" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092405 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/a16e5eaf-14b0-47eb-8bc9-dfedd024c719-node-pullsecrets\") pod \"apiserver-76f77b778f-z4jgs\" (UID: \"a16e5eaf-14b0-47eb-8bc9-dfedd024c719\") " pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092420 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a16e5eaf-14b0-47eb-8bc9-dfedd024c719-audit-dir\") pod \"apiserver-76f77b778f-z4jgs\" (UID: \"a16e5eaf-14b0-47eb-8bc9-dfedd024c719\") " pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092445 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092469 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/326772eb-ce2f-4f9b-bee5-8265a8762f26-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-8svp5\" (UID: \"326772eb-ce2f-4f9b-bee5-8265a8762f26\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-8svp5" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092483 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/326772eb-ce2f-4f9b-bee5-8265a8762f26-encryption-config\") pod \"apiserver-7bbb656c7d-8svp5\" (UID: \"326772eb-ce2f-4f9b-bee5-8265a8762f26\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-8svp5" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092500 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5pps6\" (UniqueName: \"kubernetes.io/projected/31afed1d-4e1c-491e-b54b-a5e7e24077f1-kube-api-access-5pps6\") pod \"downloads-7954f5f757-8qjxz\" (UID: \"31afed1d-4e1c-491e-b54b-a5e7e24077f1\") " pod="openshift-console/downloads-7954f5f757-8qjxz" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092515 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3924e326-c048-4d29-883f-9b2a07edfe5a-service-ca-bundle\") pod \"authentication-operator-69f744f599-gjrqw\" (UID: \"3924e326-c048-4d29-883f-9b2a07edfe5a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gjrqw" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092531 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/a16e5eaf-14b0-47eb-8bc9-dfedd024c719-encryption-config\") pod \"apiserver-76f77b778f-z4jgs\" (UID: \"a16e5eaf-14b0-47eb-8bc9-dfedd024c719\") " pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092547 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/8c6090d1-1d37-4305-9cbf-3c76c3237777-console-oauth-config\") pod \"console-f9d7485db-ckjw6\" (UID: \"8c6090d1-1d37-4305-9cbf-3c76c3237777\") " pod="openshift-console/console-f9d7485db-ckjw6" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092564 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a16e5eaf-14b0-47eb-8bc9-dfedd024c719-serving-cert\") pod \"apiserver-76f77b778f-z4jgs\" (UID: \"a16e5eaf-14b0-47eb-8bc9-dfedd024c719\") " pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092581 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8c6090d1-1d37-4305-9cbf-3c76c3237777-service-ca\") pod \"console-f9d7485db-ckjw6\" (UID: \"8c6090d1-1d37-4305-9cbf-3c76c3237777\") " pod="openshift-console/console-f9d7485db-ckjw6" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092597 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092610 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/326772eb-ce2f-4f9b-bee5-8265a8762f26-serving-cert\") pod \"apiserver-7bbb656c7d-8svp5\" (UID: \"326772eb-ce2f-4f9b-bee5-8265a8762f26\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-8svp5" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092646 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8c6090d1-1d37-4305-9cbf-3c76c3237777-trusted-ca-bundle\") pod \"console-f9d7485db-ckjw6\" (UID: \"8c6090d1-1d37-4305-9cbf-3c76c3237777\") " pod="openshift-console/console-f9d7485db-ckjw6" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092662 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-audit-policies\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.092680 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.095112 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-hcmms"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.096017 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-hcmms" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.096346 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-h9txk"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.096962 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-h9txk" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.102969 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.104342 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-jj67s"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.105205 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-jj67s" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.105830 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qgzhh"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.106813 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qgzhh" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.108894 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421540-z6t4d"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.109398 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421540-z6t4d" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.109477 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-62zfg"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.110461 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-62zfg" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.112006 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-bs6mn"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.112977 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-j9v7v"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.113660 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-j9v7v" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.114657 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x572x"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.115062 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x572x" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.122107 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.122338 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-k9b7p"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.123174 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-k9b7p" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.127330 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-8svp5"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.129110 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2rcsb"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.139550 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-2rcsb" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.140734 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-pvl7k"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.143113 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-pvl7k" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.143452 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.145756 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-ckjw6"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.152847 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-bj2zv"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.155506 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-447g6"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.157682 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-2xfp8"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.161992 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.164772 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-qj9vv"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.166733 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-qwkjm"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.168489 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-w6p7n"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.169838 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dxrvh"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.171221 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-9ndtx"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.172265 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-gjrqw"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.174054 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-b25nc"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.175472 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-nt28v"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.176726 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-z4jgs"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.181950 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.185166 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cjqcg"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.185478 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-pvl7k"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.186115 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-vd9wq"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.187581 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qgzhh"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.188788 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-hswzp"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.189709 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-hcmms"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.190739 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-z4542"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.191792 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-68bqx"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.193330 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4dww6\" (UniqueName: \"kubernetes.io/projected/54eee4e4-f460-4d64-a083-0d4b81be7561-kube-api-access-4dww6\") pod \"machine-config-operator-74547568cd-qj9vv\" (UID: \"54eee4e4-f460-4d64-a083-0d4b81be7561\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qj9vv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.193372 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zt54k\" (UniqueName: \"kubernetes.io/projected/4acd966d-4bae-456f-bffd-9ad6533cc66d-kube-api-access-zt54k\") pod \"controller-manager-879f6c89f-bs6mn\" (UID: \"4acd966d-4bae-456f-bffd-9ad6533cc66d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-bs6mn" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.193399 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d90937ae-2446-41f4-94eb-e928c5d449de-client-ca\") pod \"route-controller-manager-6576b87f9c-pfn66\" (UID: \"d90937ae-2446-41f4-94eb-e928c5d449de\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pfn66" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.193466 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/a16e5eaf-14b0-47eb-8bc9-dfedd024c719-etcd-client\") pod \"apiserver-76f77b778f-z4jgs\" (UID: \"a16e5eaf-14b0-47eb-8bc9-dfedd024c719\") " pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.193538 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6e905b55-fbc3-4cd8-b1cf-5092c2421dc0-config\") pod \"kube-controller-manager-operator-78b949d7b-cjqcg\" (UID: \"6e905b55-fbc3-4cd8-b1cf-5092c2421dc0\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cjqcg" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.193569 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/646b361f-bd5c-4be0-a52c-735688c0f88a-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-9ndtx\" (UID: \"646b361f-bd5c-4be0-a52c-735688c0f88a\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-9ndtx" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.193594 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-647nf\" (UniqueName: \"kubernetes.io/projected/8c6090d1-1d37-4305-9cbf-3c76c3237777-kube-api-access-647nf\") pod \"console-f9d7485db-ckjw6\" (UID: \"8c6090d1-1d37-4305-9cbf-3c76c3237777\") " pod="openshift-console/console-f9d7485db-ckjw6" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.193632 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.193656 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z7cjh\" (UniqueName: \"kubernetes.io/projected/3924e326-c048-4d29-883f-9b2a07edfe5a-kube-api-access-z7cjh\") pod \"authentication-operator-69f744f599-gjrqw\" (UID: \"3924e326-c048-4d29-883f-9b2a07edfe5a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gjrqw" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.193679 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rvdzf\" (UniqueName: \"kubernetes.io/projected/326772eb-ce2f-4f9b-bee5-8265a8762f26-kube-api-access-rvdzf\") pod \"apiserver-7bbb656c7d-8svp5\" (UID: \"326772eb-ce2f-4f9b-bee5-8265a8762f26\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-8svp5" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.193703 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-snswq\" (UniqueName: \"kubernetes.io/projected/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-kube-api-access-snswq\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.193723 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4acd966d-4bae-456f-bffd-9ad6533cc66d-serving-cert\") pod \"controller-manager-879f6c89f-bs6mn\" (UID: \"4acd966d-4bae-456f-bffd-9ad6533cc66d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-bs6mn" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.193749 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a5624b6f-fed8-4b28-bc4d-8143da4a19de-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-z4542\" (UID: \"a5624b6f-fed8-4b28-bc4d-8143da4a19de\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-z4542" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.193790 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gh79r\" (UniqueName: \"kubernetes.io/projected/d13b21e2-5103-4d54-ae52-1d7344e8168c-kube-api-access-gh79r\") pod \"multus-admission-controller-857f4d67dd-68bqx\" (UID: \"d13b21e2-5103-4d54-ae52-1d7344e8168c\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-68bqx" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.193818 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/54eee4e4-f460-4d64-a083-0d4b81be7561-images\") pod \"machine-config-operator-74547568cd-qj9vv\" (UID: \"54eee4e4-f460-4d64-a083-0d4b81be7561\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qj9vv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.193846 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/54eee4e4-f460-4d64-a083-0d4b81be7561-auth-proxy-config\") pod \"machine-config-operator-74547568cd-qj9vv\" (UID: \"54eee4e4-f460-4d64-a083-0d4b81be7561\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qj9vv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.193872 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/646b361f-bd5c-4be0-a52c-735688c0f88a-config\") pod \"openshift-apiserver-operator-796bbdcf4f-9ndtx\" (UID: \"646b361f-bd5c-4be0-a52c-735688c0f88a\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-9ndtx" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.193898 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.193921 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/002d496a-6bac-491c-bf18-2b4a325f78ec-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-dxrvh\" (UID: \"002d496a-6bac-491c-bf18-2b4a325f78ec\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dxrvh" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.193946 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/60e7255c-ae49-4977-94a1-1584892429cb-etcd-service-ca\") pod \"etcd-operator-b45778765-b25nc\" (UID: \"60e7255c-ae49-4977-94a1-1584892429cb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b25nc" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.193972 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9kq66\" (UniqueName: \"kubernetes.io/projected/38472de7-e7a3-4a12-8b6d-96af0d58ea1a-kube-api-access-9kq66\") pod \"migrator-59844c95c7-h9txk\" (UID: \"38472de7-e7a3-4a12-8b6d-96af0d58ea1a\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-h9txk" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.194002 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.194031 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/a85db618-f9aa-44e6-8060-3b660c70940d-machine-approver-tls\") pod \"machine-approver-56656f9798-87wwr\" (UID: \"a85db618-f9aa-44e6-8060-3b660c70940d\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-87wwr" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.194056 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a85db618-f9aa-44e6-8060-3b660c70940d-config\") pod \"machine-approver-56656f9798-87wwr\" (UID: \"a85db618-f9aa-44e6-8060-3b660c70940d\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-87wwr" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.194082 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/a16e5eaf-14b0-47eb-8bc9-dfedd024c719-image-import-ca\") pod \"apiserver-76f77b778f-z4jgs\" (UID: \"a16e5eaf-14b0-47eb-8bc9-dfedd024c719\") " pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.194109 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6e905b55-fbc3-4cd8-b1cf-5092c2421dc0-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-cjqcg\" (UID: \"6e905b55-fbc3-4cd8-b1cf-5092c2421dc0\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cjqcg" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.194131 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/54eee4e4-f460-4d64-a083-0d4b81be7561-proxy-tls\") pod \"machine-config-operator-74547568cd-qj9vv\" (UID: \"54eee4e4-f460-4d64-a083-0d4b81be7561\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qj9vv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.194169 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/277458f8-729e-4250-b4d0-db21713e4e48-images\") pod \"machine-api-operator-5694c8668f-w6p7n\" (UID: \"277458f8-729e-4250-b4d0-db21713e4e48\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-w6p7n" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.194196 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a85db618-f9aa-44e6-8060-3b660c70940d-auth-proxy-config\") pod \"machine-approver-56656f9798-87wwr\" (UID: \"a85db618-f9aa-44e6-8060-3b660c70940d\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-87wwr" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.194222 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/a16e5eaf-14b0-47eb-8bc9-dfedd024c719-node-pullsecrets\") pod \"apiserver-76f77b778f-z4jgs\" (UID: \"a16e5eaf-14b0-47eb-8bc9-dfedd024c719\") " pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.194247 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a16e5eaf-14b0-47eb-8bc9-dfedd024c719-audit-dir\") pod \"apiserver-76f77b778f-z4jgs\" (UID: \"a16e5eaf-14b0-47eb-8bc9-dfedd024c719\") " pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.194273 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dcd694c7-53b3-4289-956c-6919aa0536af-serving-cert\") pod \"console-operator-58897d9998-vd9wq\" (UID: \"dcd694c7-53b3-4289-956c-6919aa0536af\") " pod="openshift-console-operator/console-operator-58897d9998-vd9wq" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.194391 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.194423 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5pps6\" (UniqueName: \"kubernetes.io/projected/31afed1d-4e1c-491e-b54b-a5e7e24077f1-kube-api-access-5pps6\") pod \"downloads-7954f5f757-8qjxz\" (UID: \"31afed1d-4e1c-491e-b54b-a5e7e24077f1\") " pod="openshift-console/downloads-7954f5f757-8qjxz" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.194444 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d90937ae-2446-41f4-94eb-e928c5d449de-client-ca\") pod \"route-controller-manager-6576b87f9c-pfn66\" (UID: \"d90937ae-2446-41f4-94eb-e928c5d449de\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pfn66" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.194449 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/326772eb-ce2f-4f9b-bee5-8265a8762f26-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-8svp5\" (UID: \"326772eb-ce2f-4f9b-bee5-8265a8762f26\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-8svp5" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.195011 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/326772eb-ce2f-4f9b-bee5-8265a8762f26-encryption-config\") pod \"apiserver-7bbb656c7d-8svp5\" (UID: \"326772eb-ce2f-4f9b-bee5-8265a8762f26\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-8svp5" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.195038 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/297ecdc1-e23a-469b-a1f5-907876ecdfaa-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-hcmms\" (UID: \"297ecdc1-e23a-469b-a1f5-907876ecdfaa\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-hcmms" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.195067 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gvjsk\" (UniqueName: \"kubernetes.io/projected/39725dbb-08de-484d-86c4-0c99c39f0d07-kube-api-access-gvjsk\") pod \"machine-config-controller-84d6567774-ktjk2\" (UID: \"39725dbb-08de-484d-86c4-0c99c39f0d07\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ktjk2" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.195091 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4a51c95a-3cda-4cd7-baff-6f802d483286-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-2xfp8\" (UID: \"4a51c95a-3cda-4cd7-baff-6f802d483286\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-2xfp8" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.195096 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/326772eb-ce2f-4f9b-bee5-8265a8762f26-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-8svp5\" (UID: \"326772eb-ce2f-4f9b-bee5-8265a8762f26\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-8svp5" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.195110 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3924e326-c048-4d29-883f-9b2a07edfe5a-service-ca-bundle\") pod \"authentication-operator-69f744f599-gjrqw\" (UID: \"3924e326-c048-4d29-883f-9b2a07edfe5a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gjrqw" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.195143 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/a16e5eaf-14b0-47eb-8bc9-dfedd024c719-encryption-config\") pod \"apiserver-76f77b778f-z4jgs\" (UID: \"a16e5eaf-14b0-47eb-8bc9-dfedd024c719\") " pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.195173 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/60e7255c-ae49-4977-94a1-1584892429cb-serving-cert\") pod \"etcd-operator-b45778765-b25nc\" (UID: \"60e7255c-ae49-4977-94a1-1584892429cb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b25nc" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.195202 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/8c6090d1-1d37-4305-9cbf-3c76c3237777-console-oauth-config\") pod \"console-f9d7485db-ckjw6\" (UID: \"8c6090d1-1d37-4305-9cbf-3c76c3237777\") " pod="openshift-console/console-f9d7485db-ckjw6" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.195228 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a16e5eaf-14b0-47eb-8bc9-dfedd024c719-serving-cert\") pod \"apiserver-76f77b778f-z4jgs\" (UID: \"a16e5eaf-14b0-47eb-8bc9-dfedd024c719\") " pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.195283 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/60e7255c-ae49-4977-94a1-1584892429cb-etcd-ca\") pod \"etcd-operator-b45778765-b25nc\" (UID: \"60e7255c-ae49-4977-94a1-1584892429cb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b25nc" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.195308 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a5624b6f-fed8-4b28-bc4d-8143da4a19de-config\") pod \"kube-apiserver-operator-766d6c64bb-z4542\" (UID: \"a5624b6f-fed8-4b28-bc4d-8143da4a19de\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-z4542" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.195339 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8c6090d1-1d37-4305-9cbf-3c76c3237777-service-ca\") pod \"console-f9d7485db-ckjw6\" (UID: \"8c6090d1-1d37-4305-9cbf-3c76c3237777\") " pod="openshift-console/console-f9d7485db-ckjw6" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.195365 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.195390 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/326772eb-ce2f-4f9b-bee5-8265a8762f26-serving-cert\") pod \"apiserver-7bbb656c7d-8svp5\" (UID: \"326772eb-ce2f-4f9b-bee5-8265a8762f26\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-8svp5" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.195413 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8c6090d1-1d37-4305-9cbf-3c76c3237777-trusted-ca-bundle\") pod \"console-f9d7485db-ckjw6\" (UID: \"8c6090d1-1d37-4305-9cbf-3c76c3237777\") " pod="openshift-console/console-f9d7485db-ckjw6" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.195437 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-audit-policies\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.195464 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.195492 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/60e7255c-ae49-4977-94a1-1584892429cb-config\") pod \"etcd-operator-b45778765-b25nc\" (UID: \"60e7255c-ae49-4977-94a1-1584892429cb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b25nc" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.195522 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hghbz\" (UniqueName: \"kubernetes.io/projected/646b361f-bd5c-4be0-a52c-735688c0f88a-kube-api-access-hghbz\") pod \"openshift-apiserver-operator-796bbdcf4f-9ndtx\" (UID: \"646b361f-bd5c-4be0-a52c-735688c0f88a\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-9ndtx" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.195552 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/002d496a-6bac-491c-bf18-2b4a325f78ec-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-dxrvh\" (UID: \"002d496a-6bac-491c-bf18-2b4a325f78ec\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dxrvh" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.195579 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/326772eb-ce2f-4f9b-bee5-8265a8762f26-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-8svp5\" (UID: \"326772eb-ce2f-4f9b-bee5-8265a8762f26\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-8svp5" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.195635 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4a51c95a-3cda-4cd7-baff-6f802d483286-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-2xfp8\" (UID: \"4a51c95a-3cda-4cd7-baff-6f802d483286\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-2xfp8" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.195663 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/326772eb-ce2f-4f9b-bee5-8265a8762f26-audit-policies\") pod \"apiserver-7bbb656c7d-8svp5\" (UID: \"326772eb-ce2f-4f9b-bee5-8265a8762f26\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-8svp5" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.195710 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/8c6090d1-1d37-4305-9cbf-3c76c3237777-console-serving-cert\") pod \"console-f9d7485db-ckjw6\" (UID: \"8c6090d1-1d37-4305-9cbf-3c76c3237777\") " pod="openshift-console/console-f9d7485db-ckjw6" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.195733 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.195755 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.195790 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vw2dm\" (UniqueName: \"kubernetes.io/projected/d90937ae-2446-41f4-94eb-e928c5d449de-kube-api-access-vw2dm\") pod \"route-controller-manager-6576b87f9c-pfn66\" (UID: \"d90937ae-2446-41f4-94eb-e928c5d449de\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pfn66" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.195810 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/326772eb-ce2f-4f9b-bee5-8265a8762f26-etcd-client\") pod \"apiserver-7bbb656c7d-8svp5\" (UID: \"326772eb-ce2f-4f9b-bee5-8265a8762f26\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-8svp5" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.195833 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/f9529ab3-6789-4dbf-a404-9e255ad0a559-metrics-tls\") pod \"dns-operator-744455d44c-jk4jp\" (UID: \"f9529ab3-6789-4dbf-a404-9e255ad0a559\") " pod="openshift-dns-operator/dns-operator-744455d44c-jk4jp" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.195859 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.195880 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3924e326-c048-4d29-883f-9b2a07edfe5a-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-gjrqw\" (UID: \"3924e326-c048-4d29-883f-9b2a07edfe5a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gjrqw" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.195903 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4rdx5\" (UniqueName: \"kubernetes.io/projected/93712aef-1f09-4361-922f-844e0f5438b6-kube-api-access-4rdx5\") pod \"kube-storage-version-migrator-operator-b67b599dd-qwkjm\" (UID: \"93712aef-1f09-4361-922f-844e0f5438b6\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-qwkjm" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.195926 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/39725dbb-08de-484d-86c4-0c99c39f0d07-proxy-tls\") pod \"machine-config-controller-84d6567774-ktjk2\" (UID: \"39725dbb-08de-484d-86c4-0c99c39f0d07\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ktjk2" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.195946 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/39725dbb-08de-484d-86c4-0c99c39f0d07-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-ktjk2\" (UID: \"39725dbb-08de-484d-86c4-0c99c39f0d07\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ktjk2" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.195974 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/277458f8-729e-4250-b4d0-db21713e4e48-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-w6p7n\" (UID: \"277458f8-729e-4250-b4d0-db21713e4e48\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-w6p7n" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.195995 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/8c6090d1-1d37-4305-9cbf-3c76c3237777-console-config\") pod \"console-f9d7485db-ckjw6\" (UID: \"8c6090d1-1d37-4305-9cbf-3c76c3237777\") " pod="openshift-console/console-f9d7485db-ckjw6" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.196016 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-audit-dir\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.196038 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rbmfc\" (UniqueName: \"kubernetes.io/projected/a85db618-f9aa-44e6-8060-3b660c70940d-kube-api-access-rbmfc\") pod \"machine-approver-56656f9798-87wwr\" (UID: \"a85db618-f9aa-44e6-8060-3b660c70940d\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-87wwr" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.196059 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h5sc4\" (UniqueName: \"kubernetes.io/projected/a16e5eaf-14b0-47eb-8bc9-dfedd024c719-kube-api-access-h5sc4\") pod \"apiserver-76f77b778f-z4jgs\" (UID: \"a16e5eaf-14b0-47eb-8bc9-dfedd024c719\") " pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.196081 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/326772eb-ce2f-4f9b-bee5-8265a8762f26-audit-dir\") pod \"apiserver-7bbb656c7d-8svp5\" (UID: \"326772eb-ce2f-4f9b-bee5-8265a8762f26\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-8svp5" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.196102 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a5624b6f-fed8-4b28-bc4d-8143da4a19de-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-z4542\" (UID: \"a5624b6f-fed8-4b28-bc4d-8143da4a19de\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-z4542" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.196141 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/93712aef-1f09-4361-922f-844e0f5438b6-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-qwkjm\" (UID: \"93712aef-1f09-4361-922f-844e0f5438b6\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-qwkjm" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.196135 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/277458f8-729e-4250-b4d0-db21713e4e48-images\") pod \"machine-api-operator-5694c8668f-w6p7n\" (UID: \"277458f8-729e-4250-b4d0-db21713e4e48\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-w6p7n" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.196162 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4a51c95a-3cda-4cd7-baff-6f802d483286-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-2xfp8\" (UID: \"4a51c95a-3cda-4cd7-baff-6f802d483286\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-2xfp8" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.196186 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/d13b21e2-5103-4d54-ae52-1d7344e8168c-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-68bqx\" (UID: \"d13b21e2-5103-4d54-ae52-1d7344e8168c\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-68bqx" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.196208 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dcd694c7-53b3-4289-956c-6919aa0536af-trusted-ca\") pod \"console-operator-58897d9998-vd9wq\" (UID: \"dcd694c7-53b3-4289-956c-6919aa0536af\") " pod="openshift-console-operator/console-operator-58897d9998-vd9wq" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.196235 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4acd966d-4bae-456f-bffd-9ad6533cc66d-client-ca\") pod \"controller-manager-879f6c89f-bs6mn\" (UID: \"4acd966d-4bae-456f-bffd-9ad6533cc66d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-bs6mn" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.196258 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/a16e5eaf-14b0-47eb-8bc9-dfedd024c719-etcd-serving-ca\") pod \"apiserver-76f77b778f-z4jgs\" (UID: \"a16e5eaf-14b0-47eb-8bc9-dfedd024c719\") " pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.196284 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nj5h9\" (UniqueName: \"kubernetes.io/projected/60e7255c-ae49-4977-94a1-1584892429cb-kube-api-access-nj5h9\") pod \"etcd-operator-b45778765-b25nc\" (UID: \"60e7255c-ae49-4977-94a1-1584892429cb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b25nc" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.196306 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bczv8\" (UniqueName: \"kubernetes.io/projected/dcd694c7-53b3-4289-956c-6919aa0536af-kube-api-access-bczv8\") pod \"console-operator-58897d9998-vd9wq\" (UID: \"dcd694c7-53b3-4289-956c-6919aa0536af\") " pod="openshift-console-operator/console-operator-58897d9998-vd9wq" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.196329 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9r42\" (UniqueName: \"kubernetes.io/projected/002d496a-6bac-491c-bf18-2b4a325f78ec-kube-api-access-g9r42\") pod \"cluster-image-registry-operator-dc59b4c8b-dxrvh\" (UID: \"002d496a-6bac-491c-bf18-2b4a325f78ec\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dxrvh" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.196349 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/a16e5eaf-14b0-47eb-8bc9-dfedd024c719-audit\") pod \"apiserver-76f77b778f-z4jgs\" (UID: \"a16e5eaf-14b0-47eb-8bc9-dfedd024c719\") " pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.196372 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lwrcq\" (UniqueName: \"kubernetes.io/projected/f9529ab3-6789-4dbf-a404-9e255ad0a559-kube-api-access-lwrcq\") pod \"dns-operator-744455d44c-jk4jp\" (UID: \"f9529ab3-6789-4dbf-a404-9e255ad0a559\") " pod="openshift-dns-operator/dns-operator-744455d44c-jk4jp" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.196392 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dcd694c7-53b3-4289-956c-6919aa0536af-config\") pod \"console-operator-58897d9998-vd9wq\" (UID: \"dcd694c7-53b3-4289-956c-6919aa0536af\") " pod="openshift-console-operator/console-operator-58897d9998-vd9wq" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.196415 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4acd966d-4bae-456f-bffd-9ad6533cc66d-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-bs6mn\" (UID: \"4acd966d-4bae-456f-bffd-9ad6533cc66d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-bs6mn" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.196442 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k4bm5\" (UniqueName: \"kubernetes.io/projected/277458f8-729e-4250-b4d0-db21713e4e48-kube-api-access-k4bm5\") pod \"machine-api-operator-5694c8668f-w6p7n\" (UID: \"277458f8-729e-4250-b4d0-db21713e4e48\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-w6p7n" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.196467 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d90937ae-2446-41f4-94eb-e928c5d449de-config\") pod \"route-controller-manager-6576b87f9c-pfn66\" (UID: \"d90937ae-2446-41f4-94eb-e928c5d449de\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pfn66" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.196490 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p7rgd\" (UniqueName: \"kubernetes.io/projected/297ecdc1-e23a-469b-a1f5-907876ecdfaa-kube-api-access-p7rgd\") pod \"control-plane-machine-set-operator-78cbb6b69f-hcmms\" (UID: \"297ecdc1-e23a-469b-a1f5-907876ecdfaa\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-hcmms" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.196526 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/8c6090d1-1d37-4305-9cbf-3c76c3237777-oauth-serving-cert\") pod \"console-f9d7485db-ckjw6\" (UID: \"8c6090d1-1d37-4305-9cbf-3c76c3237777\") " pod="openshift-console/console-f9d7485db-ckjw6" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.196551 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/002d496a-6bac-491c-bf18-2b4a325f78ec-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-dxrvh\" (UID: \"002d496a-6bac-491c-bf18-2b4a325f78ec\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dxrvh" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.196573 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/93712aef-1f09-4361-922f-844e0f5438b6-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-qwkjm\" (UID: \"93712aef-1f09-4361-922f-844e0f5438b6\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-qwkjm" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.196598 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/277458f8-729e-4250-b4d0-db21713e4e48-config\") pod \"machine-api-operator-5694c8668f-w6p7n\" (UID: \"277458f8-729e-4250-b4d0-db21713e4e48\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-w6p7n" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.196682 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3924e326-c048-4d29-883f-9b2a07edfe5a-config\") pod \"authentication-operator-69f744f599-gjrqw\" (UID: \"3924e326-c048-4d29-883f-9b2a07edfe5a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gjrqw" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.196703 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.197739 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a16e5eaf-14b0-47eb-8bc9-dfedd024c719-trusted-ca-bundle\") pod \"apiserver-76f77b778f-z4jgs\" (UID: \"a16e5eaf-14b0-47eb-8bc9-dfedd024c719\") " pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.196710 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a16e5eaf-14b0-47eb-8bc9-dfedd024c719-trusted-ca-bundle\") pod \"apiserver-76f77b778f-z4jgs\" (UID: \"a16e5eaf-14b0-47eb-8bc9-dfedd024c719\") " pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.198031 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6e905b55-fbc3-4cd8-b1cf-5092c2421dc0-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-cjqcg\" (UID: \"6e905b55-fbc3-4cd8-b1cf-5092c2421dc0\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cjqcg" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.198444 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.198490 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3924e326-c048-4d29-883f-9b2a07edfe5a-serving-cert\") pod \"authentication-operator-69f744f599-gjrqw\" (UID: \"3924e326-c048-4d29-883f-9b2a07edfe5a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gjrqw" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.198523 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a16e5eaf-14b0-47eb-8bc9-dfedd024c719-config\") pod \"apiserver-76f77b778f-z4jgs\" (UID: \"a16e5eaf-14b0-47eb-8bc9-dfedd024c719\") " pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.198551 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/60e7255c-ae49-4977-94a1-1584892429cb-etcd-client\") pod \"etcd-operator-b45778765-b25nc\" (UID: \"60e7255c-ae49-4977-94a1-1584892429cb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b25nc" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.198579 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.198604 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4acd966d-4bae-456f-bffd-9ad6533cc66d-config\") pod \"controller-manager-879f6c89f-bs6mn\" (UID: \"4acd966d-4bae-456f-bffd-9ad6533cc66d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-bs6mn" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.198645 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d90937ae-2446-41f4-94eb-e928c5d449de-serving-cert\") pod \"route-controller-manager-6576b87f9c-pfn66\" (UID: \"d90937ae-2446-41f4-94eb-e928c5d449de\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pfn66" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.198904 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/a16e5eaf-14b0-47eb-8bc9-dfedd024c719-image-import-ca\") pod \"apiserver-76f77b778f-z4jgs\" (UID: \"a16e5eaf-14b0-47eb-8bc9-dfedd024c719\") " pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.200752 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/646b361f-bd5c-4be0-a52c-735688c0f88a-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-9ndtx\" (UID: \"646b361f-bd5c-4be0-a52c-735688c0f88a\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-9ndtx" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.200815 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4acd966d-4bae-456f-bffd-9ad6533cc66d-serving-cert\") pod \"controller-manager-879f6c89f-bs6mn\" (UID: \"4acd966d-4bae-456f-bffd-9ad6533cc66d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-bs6mn" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.201311 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/326772eb-ce2f-4f9b-bee5-8265a8762f26-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-8svp5\" (UID: \"326772eb-ce2f-4f9b-bee5-8265a8762f26\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-8svp5" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.201414 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/a16e5eaf-14b0-47eb-8bc9-dfedd024c719-encryption-config\") pod \"apiserver-76f77b778f-z4jgs\" (UID: \"a16e5eaf-14b0-47eb-8bc9-dfedd024c719\") " pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.201494 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/646b361f-bd5c-4be0-a52c-735688c0f88a-config\") pod \"openshift-apiserver-operator-796bbdcf4f-9ndtx\" (UID: \"646b361f-bd5c-4be0-a52c-735688c0f88a\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-9ndtx" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.201812 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/326772eb-ce2f-4f9b-bee5-8265a8762f26-encryption-config\") pod \"apiserver-7bbb656c7d-8svp5\" (UID: \"326772eb-ce2f-4f9b-bee5-8265a8762f26\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-8svp5" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.202054 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/a16e5eaf-14b0-47eb-8bc9-dfedd024c719-etcd-client\") pod \"apiserver-76f77b778f-z4jgs\" (UID: \"a16e5eaf-14b0-47eb-8bc9-dfedd024c719\") " pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.202258 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a16e5eaf-14b0-47eb-8bc9-dfedd024c719-config\") pod \"apiserver-76f77b778f-z4jgs\" (UID: \"a16e5eaf-14b0-47eb-8bc9-dfedd024c719\") " pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.202406 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/8c6090d1-1d37-4305-9cbf-3c76c3237777-console-oauth-config\") pod \"console-f9d7485db-ckjw6\" (UID: \"8c6090d1-1d37-4305-9cbf-3c76c3237777\") " pod="openshift-console/console-f9d7485db-ckjw6" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.202800 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.202871 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/a16e5eaf-14b0-47eb-8bc9-dfedd024c719-audit\") pod \"apiserver-76f77b778f-z4jgs\" (UID: \"a16e5eaf-14b0-47eb-8bc9-dfedd024c719\") " pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.202910 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/a85db618-f9aa-44e6-8060-3b660c70940d-machine-approver-tls\") pod \"machine-approver-56656f9798-87wwr\" (UID: \"a85db618-f9aa-44e6-8060-3b660c70940d\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-87wwr" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.203220 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/326772eb-ce2f-4f9b-bee5-8265a8762f26-audit-policies\") pod \"apiserver-7bbb656c7d-8svp5\" (UID: \"326772eb-ce2f-4f9b-bee5-8265a8762f26\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-8svp5" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.203307 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4acd966d-4bae-456f-bffd-9ad6533cc66d-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-bs6mn\" (UID: \"4acd966d-4bae-456f-bffd-9ad6533cc66d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-bs6mn" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.203501 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a85db618-f9aa-44e6-8060-3b660c70940d-config\") pod \"machine-approver-56656f9798-87wwr\" (UID: \"a85db618-f9aa-44e6-8060-3b660c70940d\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-87wwr" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.203613 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/8c6090d1-1d37-4305-9cbf-3c76c3237777-oauth-serving-cert\") pod \"console-f9d7485db-ckjw6\" (UID: \"8c6090d1-1d37-4305-9cbf-3c76c3237777\") " pod="openshift-console/console-f9d7485db-ckjw6" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.204567 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8c6090d1-1d37-4305-9cbf-3c76c3237777-trusted-ca-bundle\") pod \"console-f9d7485db-ckjw6\" (UID: \"8c6090d1-1d37-4305-9cbf-3c76c3237777\") " pod="openshift-console/console-f9d7485db-ckjw6" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.204958 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d90937ae-2446-41f4-94eb-e928c5d449de-config\") pod \"route-controller-manager-6576b87f9c-pfn66\" (UID: \"d90937ae-2446-41f4-94eb-e928c5d449de\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pfn66" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.205094 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a16e5eaf-14b0-47eb-8bc9-dfedd024c719-serving-cert\") pod \"apiserver-76f77b778f-z4jgs\" (UID: \"a16e5eaf-14b0-47eb-8bc9-dfedd024c719\") " pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.205679 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8c6090d1-1d37-4305-9cbf-3c76c3237777-service-ca\") pod \"console-f9d7485db-ckjw6\" (UID: \"8c6090d1-1d37-4305-9cbf-3c76c3237777\") " pod="openshift-console/console-f9d7485db-ckjw6" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.205946 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-audit-policies\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.206130 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/002d496a-6bac-491c-bf18-2b4a325f78ec-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-dxrvh\" (UID: \"002d496a-6bac-491c-bf18-2b4a325f78ec\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dxrvh" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.206177 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-h9txk"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.206780 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/8c6090d1-1d37-4305-9cbf-3c76c3237777-console-serving-cert\") pod \"console-f9d7485db-ckjw6\" (UID: \"8c6090d1-1d37-4305-9cbf-3c76c3237777\") " pod="openshift-console/console-f9d7485db-ckjw6" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.206805 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/277458f8-729e-4250-b4d0-db21713e4e48-config\") pod \"machine-api-operator-5694c8668f-w6p7n\" (UID: \"277458f8-729e-4250-b4d0-db21713e4e48\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-w6p7n" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.207356 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/002d496a-6bac-491c-bf18-2b4a325f78ec-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-dxrvh\" (UID: \"002d496a-6bac-491c-bf18-2b4a325f78ec\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dxrvh" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.195709 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3924e326-c048-4d29-883f-9b2a07edfe5a-service-ca-bundle\") pod \"authentication-operator-69f744f599-gjrqw\" (UID: \"3924e326-c048-4d29-883f-9b2a07edfe5a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gjrqw" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.206815 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.207579 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a85db618-f9aa-44e6-8060-3b660c70940d-auth-proxy-config\") pod \"machine-approver-56656f9798-87wwr\" (UID: \"a85db618-f9aa-44e6-8060-3b660c70940d\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-87wwr" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.207784 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/a16e5eaf-14b0-47eb-8bc9-dfedd024c719-node-pullsecrets\") pod \"apiserver-76f77b778f-z4jgs\" (UID: \"a16e5eaf-14b0-47eb-8bc9-dfedd024c719\") " pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.207953 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/277458f8-729e-4250-b4d0-db21713e4e48-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-w6p7n\" (UID: \"277458f8-729e-4250-b4d0-db21713e4e48\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-w6p7n" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.208247 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.208506 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a16e5eaf-14b0-47eb-8bc9-dfedd024c719-audit-dir\") pod \"apiserver-76f77b778f-z4jgs\" (UID: \"a16e5eaf-14b0-47eb-8bc9-dfedd024c719\") " pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.208574 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-audit-dir\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.208605 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3924e326-c048-4d29-883f-9b2a07edfe5a-serving-cert\") pod \"authentication-operator-69f744f599-gjrqw\" (UID: \"3924e326-c048-4d29-883f-9b2a07edfe5a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gjrqw" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.209086 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/8c6090d1-1d37-4305-9cbf-3c76c3237777-console-config\") pod \"console-f9d7485db-ckjw6\" (UID: \"8c6090d1-1d37-4305-9cbf-3c76c3237777\") " pod="openshift-console/console-f9d7485db-ckjw6" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.209325 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.209585 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/a16e5eaf-14b0-47eb-8bc9-dfedd024c719-etcd-serving-ca\") pod \"apiserver-76f77b778f-z4jgs\" (UID: \"a16e5eaf-14b0-47eb-8bc9-dfedd024c719\") " pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.209837 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4acd966d-4bae-456f-bffd-9ad6533cc66d-config\") pod \"controller-manager-879f6c89f-bs6mn\" (UID: \"4acd966d-4bae-456f-bffd-9ad6533cc66d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-bs6mn" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.210046 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.210575 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.210059 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3924e326-c048-4d29-883f-9b2a07edfe5a-config\") pod \"authentication-operator-69f744f599-gjrqw\" (UID: \"3924e326-c048-4d29-883f-9b2a07edfe5a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gjrqw" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.209956 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/326772eb-ce2f-4f9b-bee5-8265a8762f26-audit-dir\") pod \"apiserver-7bbb656c7d-8svp5\" (UID: \"326772eb-ce2f-4f9b-bee5-8265a8762f26\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-8svp5" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.210775 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4acd966d-4bae-456f-bffd-9ad6533cc66d-client-ca\") pod \"controller-manager-879f6c89f-bs6mn\" (UID: \"4acd966d-4bae-456f-bffd-9ad6533cc66d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-bs6mn" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.211246 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-jk4jp"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.211863 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.213654 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3924e326-c048-4d29-883f-9b2a07edfe5a-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-gjrqw\" (UID: \"3924e326-c048-4d29-883f-9b2a07edfe5a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gjrqw" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.213956 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.214179 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d90937ae-2446-41f4-94eb-e928c5d449de-serving-cert\") pod \"route-controller-manager-6576b87f9c-pfn66\" (UID: \"d90937ae-2446-41f4-94eb-e928c5d449de\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pfn66" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.217646 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.218717 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/326772eb-ce2f-4f9b-bee5-8265a8762f26-etcd-client\") pod \"apiserver-7bbb656c7d-8svp5\" (UID: \"326772eb-ce2f-4f9b-bee5-8265a8762f26\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-8svp5" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.219394 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.221276 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-8qjxz"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.221313 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c74ns"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.221349 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-ht662"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.222853 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.223098 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-ktjk2"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.223206 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.224134 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/326772eb-ce2f-4f9b-bee5-8265a8762f26-serving-cert\") pod \"apiserver-7bbb656c7d-8svp5\" (UID: \"326772eb-ce2f-4f9b-bee5-8265a8762f26\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-8svp5" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.228677 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-nlkfk"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.229913 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-xcs2n"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.230279 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-nlkfk" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.230350 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421540-z6t4d"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.230439 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-xcs2n" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.230486 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-62zfg"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.236847 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2rcsb"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.236901 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x572x"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.236912 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-k9b7p"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.238946 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-jj67s"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.240399 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-pfn66"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.240772 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-nlkfk"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.242921 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.290348 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-xcs2n"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.290480 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-j9v7v"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.293818 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.294380 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.297486 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-gjq4j"] Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.298430 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-gjq4j" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.299273 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dcd694c7-53b3-4289-956c-6919aa0536af-config\") pod \"console-operator-58897d9998-vd9wq\" (UID: \"dcd694c7-53b3-4289-956c-6919aa0536af\") " pod="openshift-console-operator/console-operator-58897d9998-vd9wq" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.299310 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lwrcq\" (UniqueName: \"kubernetes.io/projected/f9529ab3-6789-4dbf-a404-9e255ad0a559-kube-api-access-lwrcq\") pod \"dns-operator-744455d44c-jk4jp\" (UID: \"f9529ab3-6789-4dbf-a404-9e255ad0a559\") " pod="openshift-dns-operator/dns-operator-744455d44c-jk4jp" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.299347 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p7rgd\" (UniqueName: \"kubernetes.io/projected/297ecdc1-e23a-469b-a1f5-907876ecdfaa-kube-api-access-p7rgd\") pod \"control-plane-machine-set-operator-78cbb6b69f-hcmms\" (UID: \"297ecdc1-e23a-469b-a1f5-907876ecdfaa\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-hcmms" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.299374 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/93712aef-1f09-4361-922f-844e0f5438b6-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-qwkjm\" (UID: \"93712aef-1f09-4361-922f-844e0f5438b6\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-qwkjm" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.299406 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6e905b55-fbc3-4cd8-b1cf-5092c2421dc0-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-cjqcg\" (UID: \"6e905b55-fbc3-4cd8-b1cf-5092c2421dc0\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cjqcg" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.299429 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/60e7255c-ae49-4977-94a1-1584892429cb-etcd-client\") pod \"etcd-operator-b45778765-b25nc\" (UID: \"60e7255c-ae49-4977-94a1-1584892429cb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b25nc" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.299456 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4dww6\" (UniqueName: \"kubernetes.io/projected/54eee4e4-f460-4d64-a083-0d4b81be7561-kube-api-access-4dww6\") pod \"machine-config-operator-74547568cd-qj9vv\" (UID: \"54eee4e4-f460-4d64-a083-0d4b81be7561\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qj9vv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.299661 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6e905b55-fbc3-4cd8-b1cf-5092c2421dc0-config\") pod \"kube-controller-manager-operator-78b949d7b-cjqcg\" (UID: \"6e905b55-fbc3-4cd8-b1cf-5092c2421dc0\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cjqcg" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.299769 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gh79r\" (UniqueName: \"kubernetes.io/projected/d13b21e2-5103-4d54-ae52-1d7344e8168c-kube-api-access-gh79r\") pod \"multus-admission-controller-857f4d67dd-68bqx\" (UID: \"d13b21e2-5103-4d54-ae52-1d7344e8168c\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-68bqx" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.299798 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/54eee4e4-f460-4d64-a083-0d4b81be7561-images\") pod \"machine-config-operator-74547568cd-qj9vv\" (UID: \"54eee4e4-f460-4d64-a083-0d4b81be7561\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qj9vv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.299982 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/54eee4e4-f460-4d64-a083-0d4b81be7561-auth-proxy-config\") pod \"machine-config-operator-74547568cd-qj9vv\" (UID: \"54eee4e4-f460-4d64-a083-0d4b81be7561\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qj9vv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.300732 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a5624b6f-fed8-4b28-bc4d-8143da4a19de-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-z4542\" (UID: \"a5624b6f-fed8-4b28-bc4d-8143da4a19de\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-z4542" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.300767 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/60e7255c-ae49-4977-94a1-1584892429cb-etcd-service-ca\") pod \"etcd-operator-b45778765-b25nc\" (UID: \"60e7255c-ae49-4977-94a1-1584892429cb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b25nc" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.300791 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9kq66\" (UniqueName: \"kubernetes.io/projected/38472de7-e7a3-4a12-8b6d-96af0d58ea1a-kube-api-access-9kq66\") pod \"migrator-59844c95c7-h9txk\" (UID: \"38472de7-e7a3-4a12-8b6d-96af0d58ea1a\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-h9txk" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.300822 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6e905b55-fbc3-4cd8-b1cf-5092c2421dc0-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-cjqcg\" (UID: \"6e905b55-fbc3-4cd8-b1cf-5092c2421dc0\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cjqcg" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.300845 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/54eee4e4-f460-4d64-a083-0d4b81be7561-proxy-tls\") pod \"machine-config-operator-74547568cd-qj9vv\" (UID: \"54eee4e4-f460-4d64-a083-0d4b81be7561\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qj9vv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.300884 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dcd694c7-53b3-4289-956c-6919aa0536af-serving-cert\") pod \"console-operator-58897d9998-vd9wq\" (UID: \"dcd694c7-53b3-4289-956c-6919aa0536af\") " pod="openshift-console-operator/console-operator-58897d9998-vd9wq" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.300921 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/297ecdc1-e23a-469b-a1f5-907876ecdfaa-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-hcmms\" (UID: \"297ecdc1-e23a-469b-a1f5-907876ecdfaa\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-hcmms" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.300947 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gvjsk\" (UniqueName: \"kubernetes.io/projected/39725dbb-08de-484d-86c4-0c99c39f0d07-kube-api-access-gvjsk\") pod \"machine-config-controller-84d6567774-ktjk2\" (UID: \"39725dbb-08de-484d-86c4-0c99c39f0d07\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ktjk2" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.301007 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4a51c95a-3cda-4cd7-baff-6f802d483286-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-2xfp8\" (UID: \"4a51c95a-3cda-4cd7-baff-6f802d483286\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-2xfp8" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.301037 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/60e7255c-ae49-4977-94a1-1584892429cb-serving-cert\") pod \"etcd-operator-b45778765-b25nc\" (UID: \"60e7255c-ae49-4977-94a1-1584892429cb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b25nc" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.301059 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a5624b6f-fed8-4b28-bc4d-8143da4a19de-config\") pod \"kube-apiserver-operator-766d6c64bb-z4542\" (UID: \"a5624b6f-fed8-4b28-bc4d-8143da4a19de\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-z4542" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.301083 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/60e7255c-ae49-4977-94a1-1584892429cb-etcd-ca\") pod \"etcd-operator-b45778765-b25nc\" (UID: \"60e7255c-ae49-4977-94a1-1584892429cb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b25nc" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.301109 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/60e7255c-ae49-4977-94a1-1584892429cb-config\") pod \"etcd-operator-b45778765-b25nc\" (UID: \"60e7255c-ae49-4977-94a1-1584892429cb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b25nc" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.301141 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4a51c95a-3cda-4cd7-baff-6f802d483286-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-2xfp8\" (UID: \"4a51c95a-3cda-4cd7-baff-6f802d483286\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-2xfp8" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.301182 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/f9529ab3-6789-4dbf-a404-9e255ad0a559-metrics-tls\") pod \"dns-operator-744455d44c-jk4jp\" (UID: \"f9529ab3-6789-4dbf-a404-9e255ad0a559\") " pod="openshift-dns-operator/dns-operator-744455d44c-jk4jp" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.301198 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4rdx5\" (UniqueName: \"kubernetes.io/projected/93712aef-1f09-4361-922f-844e0f5438b6-kube-api-access-4rdx5\") pod \"kube-storage-version-migrator-operator-b67b599dd-qwkjm\" (UID: \"93712aef-1f09-4361-922f-844e0f5438b6\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-qwkjm" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.301217 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/39725dbb-08de-484d-86c4-0c99c39f0d07-proxy-tls\") pod \"machine-config-controller-84d6567774-ktjk2\" (UID: \"39725dbb-08de-484d-86c4-0c99c39f0d07\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ktjk2" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.301233 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/39725dbb-08de-484d-86c4-0c99c39f0d07-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-ktjk2\" (UID: \"39725dbb-08de-484d-86c4-0c99c39f0d07\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ktjk2" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.301263 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a5624b6f-fed8-4b28-bc4d-8143da4a19de-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-z4542\" (UID: \"a5624b6f-fed8-4b28-bc4d-8143da4a19de\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-z4542" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.301278 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/93712aef-1f09-4361-922f-844e0f5438b6-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-qwkjm\" (UID: \"93712aef-1f09-4361-922f-844e0f5438b6\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-qwkjm" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.301293 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4a51c95a-3cda-4cd7-baff-6f802d483286-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-2xfp8\" (UID: \"4a51c95a-3cda-4cd7-baff-6f802d483286\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-2xfp8" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.301307 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/d13b21e2-5103-4d54-ae52-1d7344e8168c-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-68bqx\" (UID: \"d13b21e2-5103-4d54-ae52-1d7344e8168c\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-68bqx" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.301330 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dcd694c7-53b3-4289-956c-6919aa0536af-trusted-ca\") pod \"console-operator-58897d9998-vd9wq\" (UID: \"dcd694c7-53b3-4289-956c-6919aa0536af\") " pod="openshift-console-operator/console-operator-58897d9998-vd9wq" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.301345 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nj5h9\" (UniqueName: \"kubernetes.io/projected/60e7255c-ae49-4977-94a1-1584892429cb-kube-api-access-nj5h9\") pod \"etcd-operator-b45778765-b25nc\" (UID: \"60e7255c-ae49-4977-94a1-1584892429cb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b25nc" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.301359 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bczv8\" (UniqueName: \"kubernetes.io/projected/dcd694c7-53b3-4289-956c-6919aa0536af-kube-api-access-bczv8\") pod \"console-operator-58897d9998-vd9wq\" (UID: \"dcd694c7-53b3-4289-956c-6919aa0536af\") " pod="openshift-console-operator/console-operator-58897d9998-vd9wq" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.301876 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/54eee4e4-f460-4d64-a083-0d4b81be7561-auth-proxy-config\") pod \"machine-config-operator-74547568cd-qj9vv\" (UID: \"54eee4e4-f460-4d64-a083-0d4b81be7561\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qj9vv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.301905 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dcd694c7-53b3-4289-956c-6919aa0536af-config\") pod \"console-operator-58897d9998-vd9wq\" (UID: \"dcd694c7-53b3-4289-956c-6919aa0536af\") " pod="openshift-console-operator/console-operator-58897d9998-vd9wq" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.302035 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.302684 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4a51c95a-3cda-4cd7-baff-6f802d483286-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-2xfp8\" (UID: \"4a51c95a-3cda-4cd7-baff-6f802d483286\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-2xfp8" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.303232 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dcd694c7-53b3-4289-956c-6919aa0536af-trusted-ca\") pod \"console-operator-58897d9998-vd9wq\" (UID: \"dcd694c7-53b3-4289-956c-6919aa0536af\") " pod="openshift-console-operator/console-operator-58897d9998-vd9wq" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.303980 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/39725dbb-08de-484d-86c4-0c99c39f0d07-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-ktjk2\" (UID: \"39725dbb-08de-484d-86c4-0c99c39f0d07\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ktjk2" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.304574 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dcd694c7-53b3-4289-956c-6919aa0536af-serving-cert\") pod \"console-operator-58897d9998-vd9wq\" (UID: \"dcd694c7-53b3-4289-956c-6919aa0536af\") " pod="openshift-console-operator/console-operator-58897d9998-vd9wq" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.304871 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4a51c95a-3cda-4cd7-baff-6f802d483286-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-2xfp8\" (UID: \"4a51c95a-3cda-4cd7-baff-6f802d483286\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-2xfp8" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.306178 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/f9529ab3-6789-4dbf-a404-9e255ad0a559-metrics-tls\") pod \"dns-operator-744455d44c-jk4jp\" (UID: \"f9529ab3-6789-4dbf-a404-9e255ad0a559\") " pod="openshift-dns-operator/dns-operator-744455d44c-jk4jp" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.322305 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.342895 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.381979 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.382757 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/60e7255c-ae49-4977-94a1-1584892429cb-config\") pod \"etcd-operator-b45778765-b25nc\" (UID: \"60e7255c-ae49-4977-94a1-1584892429cb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b25nc" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.402865 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.412459 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/60e7255c-ae49-4977-94a1-1584892429cb-etcd-client\") pod \"etcd-operator-b45778765-b25nc\" (UID: \"60e7255c-ae49-4977-94a1-1584892429cb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b25nc" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.423245 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.442760 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.455414 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/60e7255c-ae49-4977-94a1-1584892429cb-serving-cert\") pod \"etcd-operator-b45778765-b25nc\" (UID: \"60e7255c-ae49-4977-94a1-1584892429cb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b25nc" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.463033 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.472962 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/60e7255c-ae49-4977-94a1-1584892429cb-etcd-ca\") pod \"etcd-operator-b45778765-b25nc\" (UID: \"60e7255c-ae49-4977-94a1-1584892429cb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b25nc" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.482353 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.491611 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/60e7255c-ae49-4977-94a1-1584892429cb-etcd-service-ca\") pod \"etcd-operator-b45778765-b25nc\" (UID: \"60e7255c-ae49-4977-94a1-1584892429cb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b25nc" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.502840 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.522356 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.542610 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.550699 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/54eee4e4-f460-4d64-a083-0d4b81be7561-images\") pod \"machine-config-operator-74547568cd-qj9vv\" (UID: \"54eee4e4-f460-4d64-a083-0d4b81be7561\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qj9vv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.562719 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.582406 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.594466 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/54eee4e4-f460-4d64-a083-0d4b81be7561-proxy-tls\") pod \"machine-config-operator-74547568cd-qj9vv\" (UID: \"54eee4e4-f460-4d64-a083-0d4b81be7561\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qj9vv" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.602731 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.622880 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.636169 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/39725dbb-08de-484d-86c4-0c99c39f0d07-proxy-tls\") pod \"machine-config-controller-84d6567774-ktjk2\" (UID: \"39725dbb-08de-484d-86c4-0c99c39f0d07\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ktjk2" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.643246 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.662028 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.667082 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/d13b21e2-5103-4d54-ae52-1d7344e8168c-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-68bqx\" (UID: \"d13b21e2-5103-4d54-ae52-1d7344e8168c\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-68bqx" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.683335 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.702106 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.722951 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.742769 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.763083 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.782965 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.793223 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/93712aef-1f09-4361-922f-844e0f5438b6-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-qwkjm\" (UID: \"93712aef-1f09-4361-922f-844e0f5438b6\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-qwkjm" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.801675 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.822835 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.842963 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.862987 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.873524 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/93712aef-1f09-4361-922f-844e0f5438b6-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-qwkjm\" (UID: \"93712aef-1f09-4361-922f-844e0f5438b6\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-qwkjm" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.890686 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.902608 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.922415 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.943203 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.962368 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.982983 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Dec 09 15:10:47 crc kubenswrapper[4716]: I1209 15:10:47.992593 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a5624b6f-fed8-4b28-bc4d-8143da4a19de-config\") pod \"kube-apiserver-operator-766d6c64bb-z4542\" (UID: \"a5624b6f-fed8-4b28-bc4d-8143da4a19de\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-z4542" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.002314 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.022384 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.035655 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a5624b6f-fed8-4b28-bc4d-8143da4a19de-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-z4542\" (UID: \"a5624b6f-fed8-4b28-bc4d-8143da4a19de\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-z4542" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.042828 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.061741 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.083022 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.092720 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6e905b55-fbc3-4cd8-b1cf-5092c2421dc0-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-cjqcg\" (UID: \"6e905b55-fbc3-4cd8-b1cf-5092c2421dc0\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cjqcg" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.100688 4716 request.go:700] Waited for 1.012115309s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager-operator/configmaps?fieldSelector=metadata.name%3Dkube-controller-manager-operator-config&limit=500&resourceVersion=0 Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.102707 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.111108 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6e905b55-fbc3-4cd8-b1cf-5092c2421dc0-config\") pod \"kube-controller-manager-operator-78b949d7b-cjqcg\" (UID: \"6e905b55-fbc3-4cd8-b1cf-5092c2421dc0\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cjqcg" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.123157 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.142486 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.155508 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/297ecdc1-e23a-469b-a1f5-907876ecdfaa-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-hcmms\" (UID: \"297ecdc1-e23a-469b-a1f5-907876ecdfaa\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-hcmms" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.162331 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.182961 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.202515 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.222598 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.261862 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.282096 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.301878 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.322399 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.342700 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.362216 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.383268 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.403326 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.423137 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.443099 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.463018 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.483052 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.503359 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.522694 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.543260 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.563607 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.583349 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.601720 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.622188 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.642478 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.662187 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.688467 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.702234 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.723101 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.742330 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.762670 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.781992 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.801560 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.839760 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zt54k\" (UniqueName: \"kubernetes.io/projected/4acd966d-4bae-456f-bffd-9ad6533cc66d-kube-api-access-zt54k\") pod \"controller-manager-879f6c89f-bs6mn\" (UID: \"4acd966d-4bae-456f-bffd-9ad6533cc66d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-bs6mn" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.857569 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-647nf\" (UniqueName: \"kubernetes.io/projected/8c6090d1-1d37-4305-9cbf-3c76c3237777-kube-api-access-647nf\") pod \"console-f9d7485db-ckjw6\" (UID: \"8c6090d1-1d37-4305-9cbf-3c76c3237777\") " pod="openshift-console/console-f9d7485db-ckjw6" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.878836 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z7cjh\" (UniqueName: \"kubernetes.io/projected/3924e326-c048-4d29-883f-9b2a07edfe5a-kube-api-access-z7cjh\") pod \"authentication-operator-69f744f599-gjrqw\" (UID: \"3924e326-c048-4d29-883f-9b2a07edfe5a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-gjrqw" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.896764 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rvdzf\" (UniqueName: \"kubernetes.io/projected/326772eb-ce2f-4f9b-bee5-8265a8762f26-kube-api-access-rvdzf\") pod \"apiserver-7bbb656c7d-8svp5\" (UID: \"326772eb-ce2f-4f9b-bee5-8265a8762f26\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-8svp5" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.917045 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-snswq\" (UniqueName: \"kubernetes.io/projected/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-kube-api-access-snswq\") pod \"oauth-openshift-558db77b4-bj2zv\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.938544 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-gjrqw" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.954420 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9r42\" (UniqueName: \"kubernetes.io/projected/002d496a-6bac-491c-bf18-2b4a325f78ec-kube-api-access-g9r42\") pod \"cluster-image-registry-operator-dc59b4c8b-dxrvh\" (UID: \"002d496a-6bac-491c-bf18-2b4a325f78ec\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dxrvh" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.957363 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k4bm5\" (UniqueName: \"kubernetes.io/projected/277458f8-729e-4250-b4d0-db21713e4e48-kube-api-access-k4bm5\") pod \"machine-api-operator-5694c8668f-w6p7n\" (UID: \"277458f8-729e-4250-b4d0-db21713e4e48\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-w6p7n" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.960076 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-ckjw6" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.980702 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hghbz\" (UniqueName: \"kubernetes.io/projected/646b361f-bd5c-4be0-a52c-735688c0f88a-kube-api-access-hghbz\") pod \"openshift-apiserver-operator-796bbdcf4f-9ndtx\" (UID: \"646b361f-bd5c-4be0-a52c-735688c0f88a\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-9ndtx" Dec 09 15:10:48 crc kubenswrapper[4716]: I1209 15:10:48.996674 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vw2dm\" (UniqueName: \"kubernetes.io/projected/d90937ae-2446-41f4-94eb-e928c5d449de-kube-api-access-vw2dm\") pod \"route-controller-manager-6576b87f9c-pfn66\" (UID: \"d90937ae-2446-41f4-94eb-e928c5d449de\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pfn66" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.020360 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/002d496a-6bac-491c-bf18-2b4a325f78ec-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-dxrvh\" (UID: \"002d496a-6bac-491c-bf18-2b4a325f78ec\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dxrvh" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.040259 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rbmfc\" (UniqueName: \"kubernetes.io/projected/a85db618-f9aa-44e6-8060-3b660c70940d-kube-api-access-rbmfc\") pod \"machine-approver-56656f9798-87wwr\" (UID: \"a85db618-f9aa-44e6-8060-3b660c70940d\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-87wwr" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.062568 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h5sc4\" (UniqueName: \"kubernetes.io/projected/a16e5eaf-14b0-47eb-8bc9-dfedd024c719-kube-api-access-h5sc4\") pod \"apiserver-76f77b778f-z4jgs\" (UID: \"a16e5eaf-14b0-47eb-8bc9-dfedd024c719\") " pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.066217 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pfn66" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.084669 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.090109 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5pps6\" (UniqueName: \"kubernetes.io/projected/31afed1d-4e1c-491e-b54b-a5e7e24077f1-kube-api-access-5pps6\") pod \"downloads-7954f5f757-8qjxz\" (UID: \"31afed1d-4e1c-491e-b54b-a5e7e24077f1\") " pod="openshift-console/downloads-7954f5f757-8qjxz" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.093912 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-bs6mn" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.100804 4716 request.go:700] Waited for 1.87001765s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-ingress-canary/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&limit=500&resourceVersion=0 Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.103292 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.105206 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-w6p7n" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.122944 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.134016 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.146804 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-ckjw6"] Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.147349 4716 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.150422 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-8svp5" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.162052 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.181660 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.186270 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.200768 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-87wwr" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.204298 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.211653 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-9ndtx" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.227522 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.230464 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dxrvh" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.242706 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.247265 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-gjrqw"] Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.254172 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-8qjxz" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.279409 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.306850 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p7rgd\" (UniqueName: \"kubernetes.io/projected/297ecdc1-e23a-469b-a1f5-907876ecdfaa-kube-api-access-p7rgd\") pod \"control-plane-machine-set-operator-78cbb6b69f-hcmms\" (UID: \"297ecdc1-e23a-469b-a1f5-907876ecdfaa\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-hcmms" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.332758 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lwrcq\" (UniqueName: \"kubernetes.io/projected/f9529ab3-6789-4dbf-a404-9e255ad0a559-kube-api-access-lwrcq\") pod \"dns-operator-744455d44c-jk4jp\" (UID: \"f9529ab3-6789-4dbf-a404-9e255ad0a559\") " pod="openshift-dns-operator/dns-operator-744455d44c-jk4jp" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.334706 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-bs6mn"] Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.340968 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4dww6\" (UniqueName: \"kubernetes.io/projected/54eee4e4-f460-4d64-a083-0d4b81be7561-kube-api-access-4dww6\") pod \"machine-config-operator-74547568cd-qj9vv\" (UID: \"54eee4e4-f460-4d64-a083-0d4b81be7561\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qj9vv" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.365708 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gh79r\" (UniqueName: \"kubernetes.io/projected/d13b21e2-5103-4d54-ae52-1d7344e8168c-kube-api-access-gh79r\") pod \"multus-admission-controller-857f4d67dd-68bqx\" (UID: \"d13b21e2-5103-4d54-ae52-1d7344e8168c\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-68bqx" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.386414 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a5624b6f-fed8-4b28-bc4d-8143da4a19de-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-z4542\" (UID: \"a5624b6f-fed8-4b28-bc4d-8143da4a19de\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-z4542" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.388692 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-pfn66"] Dec 09 15:10:49 crc kubenswrapper[4716]: W1209 15:10:49.401173 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4acd966d_4bae_456f_bffd_9ad6533cc66d.slice/crio-3945069fb90ed8aeb1f7386a28d8bbf335cca6cdaf0d28d8f1a3a8715a038581 WatchSource:0}: Error finding container 3945069fb90ed8aeb1f7386a28d8bbf335cca6cdaf0d28d8f1a3a8715a038581: Status 404 returned error can't find the container with id 3945069fb90ed8aeb1f7386a28d8bbf335cca6cdaf0d28d8f1a3a8715a038581 Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.405650 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9kq66\" (UniqueName: \"kubernetes.io/projected/38472de7-e7a3-4a12-8b6d-96af0d58ea1a-kube-api-access-9kq66\") pod \"migrator-59844c95c7-h9txk\" (UID: \"38472de7-e7a3-4a12-8b6d-96af0d58ea1a\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-h9txk" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.424409 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6e905b55-fbc3-4cd8-b1cf-5092c2421dc0-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-cjqcg\" (UID: \"6e905b55-fbc3-4cd8-b1cf-5092c2421dc0\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cjqcg" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.443253 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qj9vv" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.443732 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-z4jgs"] Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.447477 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gvjsk\" (UniqueName: \"kubernetes.io/projected/39725dbb-08de-484d-86c4-0c99c39f0d07-kube-api-access-gvjsk\") pod \"machine-config-controller-84d6567774-ktjk2\" (UID: \"39725dbb-08de-484d-86c4-0c99c39f0d07\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ktjk2" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.457770 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4a51c95a-3cda-4cd7-baff-6f802d483286-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-2xfp8\" (UID: \"4a51c95a-3cda-4cd7-baff-6f802d483286\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-2xfp8" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.458109 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ktjk2" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.471291 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-bj2zv"] Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.478326 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4rdx5\" (UniqueName: \"kubernetes.io/projected/93712aef-1f09-4361-922f-844e0f5438b6-kube-api-access-4rdx5\") pod \"kube-storage-version-migrator-operator-b67b599dd-qwkjm\" (UID: \"93712aef-1f09-4361-922f-844e0f5438b6\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-qwkjm" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.479408 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-68bqx" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.490516 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-z4542" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.498278 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nj5h9\" (UniqueName: \"kubernetes.io/projected/60e7255c-ae49-4977-94a1-1584892429cb-kube-api-access-nj5h9\") pod \"etcd-operator-b45778765-b25nc\" (UID: \"60e7255c-ae49-4977-94a1-1584892429cb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b25nc" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.499266 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cjqcg" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.509395 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-hcmms" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.514656 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-h9txk" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.516093 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-w6p7n"] Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.517971 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bczv8\" (UniqueName: \"kubernetes.io/projected/dcd694c7-53b3-4289-956c-6919aa0536af-kube-api-access-bczv8\") pod \"console-operator-58897d9998-vd9wq\" (UID: \"dcd694c7-53b3-4289-956c-6919aa0536af\") " pod="openshift-console-operator/console-operator-58897d9998-vd9wq" Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.609553 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dxrvh"] Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.744999 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-9ndtx"] Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.762363 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-8svp5"] Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.784165 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-8qjxz"] Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.872458 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-gjrqw" event={"ID":"3924e326-c048-4d29-883f-9b2a07edfe5a","Type":"ContainerStarted","Data":"69e502f98e00fc364cbe97311cded474663cdba296ff5ab3547261de92599d74"} Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.873423 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-87wwr" event={"ID":"a85db618-f9aa-44e6-8060-3b660c70940d","Type":"ContainerStarted","Data":"95b25e7fdbbe03973a920e0350b3873d1f81ab55ce5224383fe98d36594ebbea"} Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.874293 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-bs6mn" event={"ID":"4acd966d-4bae-456f-bffd-9ad6533cc66d","Type":"ContainerStarted","Data":"3945069fb90ed8aeb1f7386a28d8bbf335cca6cdaf0d28d8f1a3a8715a038581"} Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.875132 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-ckjw6" event={"ID":"8c6090d1-1d37-4305-9cbf-3c76c3237777","Type":"ContainerStarted","Data":"1b19a061f0efd05c9f456caa35a1996e225c681a63eb4b787d85536290d36453"} Dec 09 15:10:49 crc kubenswrapper[4716]: I1209 15:10:49.887004 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pfn66" event={"ID":"d90937ae-2446-41f4-94eb-e928c5d449de","Type":"ContainerStarted","Data":"c4df3decba4cf0113c63157c9570993add444413bb09039c1c08e79e11969790"} Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.190313 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-jk4jp" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.190439 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-2xfp8" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.190677 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-b25nc" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.190756 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-qwkjm" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.196396 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-vd9wq" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.200835 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a9146318-442c-453e-977e-802cdaa5532a-trusted-ca\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.201074 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/a9146318-442c-453e-977e-802cdaa5532a-registry-tls\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.201138 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/a9146318-442c-453e-977e-802cdaa5532a-ca-trust-extracted\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.201232 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:50 crc kubenswrapper[4716]: E1209 15:10:50.204521 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:50.704502452 +0000 UTC m=+137.859246440 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.204917 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a9146318-442c-453e-977e-802cdaa5532a-bound-sa-token\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.204955 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xr87p\" (UniqueName: \"kubernetes.io/projected/a9146318-442c-453e-977e-802cdaa5532a-kube-api-access-xr87p\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.205349 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/a9146318-442c-453e-977e-802cdaa5532a-registry-certificates\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.206024 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/a9146318-442c-453e-977e-802cdaa5532a-installation-pull-secrets\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.307094 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:50 crc kubenswrapper[4716]: E1209 15:10:50.307267 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:50.807226759 +0000 UTC m=+137.961970747 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.307342 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/62b9c370-77f9-489c-b34f-6ea6a45e4245-trusted-ca\") pod \"ingress-operator-5b745b69d9-447g6\" (UID: \"62b9c370-77f9-489c-b34f-6ea6a45e4245\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-447g6" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.307402 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a9146318-442c-453e-977e-802cdaa5532a-bound-sa-token\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.307428 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xr87p\" (UniqueName: \"kubernetes.io/projected/a9146318-442c-453e-977e-802cdaa5532a-kube-api-access-xr87p\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.307457 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/a9146318-442c-453e-977e-802cdaa5532a-registry-certificates\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.307486 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/4ff467d8-21a2-4f5f-a9eb-37800bfc01d8-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-hswzp\" (UID: \"4ff467d8-21a2-4f5f-a9eb-37800bfc01d8\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-hswzp" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.307517 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/a9146318-442c-453e-977e-802cdaa5532a-installation-pull-secrets\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.307574 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zhgjc\" (UniqueName: \"kubernetes.io/projected/62b9c370-77f9-489c-b34f-6ea6a45e4245-kube-api-access-zhgjc\") pod \"ingress-operator-5b745b69d9-447g6\" (UID: \"62b9c370-77f9-489c-b34f-6ea6a45e4245\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-447g6" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.307600 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a9146318-442c-453e-977e-802cdaa5532a-trusted-ca\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.307661 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.307693 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k9btn\" (UniqueName: \"kubernetes.io/projected/e432b91b-278e-4a2f-81cc-4b7983a789da-kube-api-access-k9btn\") pod \"router-default-5444994796-w9bdm\" (UID: \"e432b91b-278e-4a2f-81cc-4b7983a789da\") " pod="openshift-ingress/router-default-5444994796-w9bdm" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.307720 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/f08b8fc4-fca7-4a7e-b42d-97f41d3cd136-available-featuregates\") pod \"openshift-config-operator-7777fb866f-nt28v\" (UID: \"f08b8fc4-fca7-4a7e-b42d-97f41d3cd136\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-nt28v" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.307752 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/62b9c370-77f9-489c-b34f-6ea6a45e4245-bound-sa-token\") pod \"ingress-operator-5b745b69d9-447g6\" (UID: \"62b9c370-77f9-489c-b34f-6ea6a45e4245\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-447g6" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.307789 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6995d782-f9c9-4e5a-95a6-07ac426ad59c-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-c74ns\" (UID: \"6995d782-f9c9-4e5a-95a6-07ac426ad59c\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c74ns" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.307824 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e432b91b-278e-4a2f-81cc-4b7983a789da-metrics-certs\") pod \"router-default-5444994796-w9bdm\" (UID: \"e432b91b-278e-4a2f-81cc-4b7983a789da\") " pod="openshift-ingress/router-default-5444994796-w9bdm" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.307856 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e432b91b-278e-4a2f-81cc-4b7983a789da-service-ca-bundle\") pod \"router-default-5444994796-w9bdm\" (UID: \"e432b91b-278e-4a2f-81cc-4b7983a789da\") " pod="openshift-ingress/router-default-5444994796-w9bdm" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.307892 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f08b8fc4-fca7-4a7e-b42d-97f41d3cd136-serving-cert\") pod \"openshift-config-operator-7777fb866f-nt28v\" (UID: \"f08b8fc4-fca7-4a7e-b42d-97f41d3cd136\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-nt28v" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.307966 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/a9146318-442c-453e-977e-802cdaa5532a-registry-tls\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.307992 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/62b9c370-77f9-489c-b34f-6ea6a45e4245-metrics-tls\") pod \"ingress-operator-5b745b69d9-447g6\" (UID: \"62b9c370-77f9-489c-b34f-6ea6a45e4245\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-447g6" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.308014 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/a9146318-442c-453e-977e-802cdaa5532a-ca-trust-extracted\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.308055 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/e432b91b-278e-4a2f-81cc-4b7983a789da-default-certificate\") pod \"router-default-5444994796-w9bdm\" (UID: \"e432b91b-278e-4a2f-81cc-4b7983a789da\") " pod="openshift-ingress/router-default-5444994796-w9bdm" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.308078 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45x5s\" (UniqueName: \"kubernetes.io/projected/6995d782-f9c9-4e5a-95a6-07ac426ad59c-kube-api-access-45x5s\") pod \"openshift-controller-manager-operator-756b6f6bc6-c74ns\" (UID: \"6995d782-f9c9-4e5a-95a6-07ac426ad59c\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c74ns" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.308121 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/e432b91b-278e-4a2f-81cc-4b7983a789da-stats-auth\") pod \"router-default-5444994796-w9bdm\" (UID: \"e432b91b-278e-4a2f-81cc-4b7983a789da\") " pod="openshift-ingress/router-default-5444994796-w9bdm" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.308142 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6995d782-f9c9-4e5a-95a6-07ac426ad59c-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-c74ns\" (UID: \"6995d782-f9c9-4e5a-95a6-07ac426ad59c\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c74ns" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.308166 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rwh5h\" (UniqueName: \"kubernetes.io/projected/f08b8fc4-fca7-4a7e-b42d-97f41d3cd136-kube-api-access-rwh5h\") pod \"openshift-config-operator-7777fb866f-nt28v\" (UID: \"f08b8fc4-fca7-4a7e-b42d-97f41d3cd136\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-nt28v" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.308194 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4kw5t\" (UniqueName: \"kubernetes.io/projected/4ff467d8-21a2-4f5f-a9eb-37800bfc01d8-kube-api-access-4kw5t\") pod \"cluster-samples-operator-665b6dd947-hswzp\" (UID: \"4ff467d8-21a2-4f5f-a9eb-37800bfc01d8\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-hswzp" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.309833 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/a9146318-442c-453e-977e-802cdaa5532a-registry-certificates\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:50 crc kubenswrapper[4716]: E1209 15:10:50.310195 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:50.810179172 +0000 UTC m=+137.964923160 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.310418 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a9146318-442c-453e-977e-802cdaa5532a-trusted-ca\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.311323 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/a9146318-442c-453e-977e-802cdaa5532a-ca-trust-extracted\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.317345 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/a9146318-442c-453e-977e-802cdaa5532a-registry-tls\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.320209 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/a9146318-442c-453e-977e-802cdaa5532a-installation-pull-secrets\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.359985 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xr87p\" (UniqueName: \"kubernetes.io/projected/a9146318-442c-453e-977e-802cdaa5532a-kube-api-access-xr87p\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.378918 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a9146318-442c-453e-977e-802cdaa5532a-bound-sa-token\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.410126 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:50 crc kubenswrapper[4716]: E1209 15:10:50.410559 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:50.910375848 +0000 UTC m=+138.065119836 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.410586 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5d22l\" (UniqueName: \"kubernetes.io/projected/628cc532-1e6f-4a8e-a535-60d65177f38b-kube-api-access-5d22l\") pod \"catalog-operator-68c6474976-j9v7v\" (UID: \"628cc532-1e6f-4a8e-a535-60d65177f38b\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-j9v7v" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.410634 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c4635fa5-b3e0-458f-80ef-4f44cc8097fa-serving-cert\") pod \"service-ca-operator-777779d784-k9b7p\" (UID: \"c4635fa5-b3e0-458f-80ef-4f44cc8097fa\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-k9b7p" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.415537 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5jq5p\" (UniqueName: \"kubernetes.io/projected/a25d26a9-7c6b-455e-9b8d-cc2fba08c576-kube-api-access-5jq5p\") pod \"marketplace-operator-79b997595-2rcsb\" (UID: \"a25d26a9-7c6b-455e-9b8d-cc2fba08c576\") " pod="openshift-marketplace/marketplace-operator-79b997595-2rcsb" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.415588 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e432b91b-278e-4a2f-81cc-4b7983a789da-service-ca-bundle\") pod \"router-default-5444994796-w9bdm\" (UID: \"e432b91b-278e-4a2f-81cc-4b7983a789da\") " pod="openshift-ingress/router-default-5444994796-w9bdm" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.415731 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f08b8fc4-fca7-4a7e-b42d-97f41d3cd136-serving-cert\") pod \"openshift-config-operator-7777fb866f-nt28v\" (UID: \"f08b8fc4-fca7-4a7e-b42d-97f41d3cd136\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-nt28v" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.415808 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/e70d414d-6ada-448f-8047-73667b53d329-signing-cabundle\") pod \"service-ca-9c57cc56f-jj67s\" (UID: \"e70d414d-6ada-448f-8047-73667b53d329\") " pod="openshift-service-ca/service-ca-9c57cc56f-jj67s" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.415875 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tvh6z\" (UniqueName: \"kubernetes.io/projected/103c6227-5dc6-4322-ae14-201ad9e08295-kube-api-access-tvh6z\") pod \"dns-default-pvl7k\" (UID: \"103c6227-5dc6-4322-ae14-201ad9e08295\") " pod="openshift-dns/dns-default-pvl7k" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.416063 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7lm99\" (UniqueName: \"kubernetes.io/projected/a18804a5-1680-4833-b3f6-1f02584286e1-kube-api-access-7lm99\") pod \"ingress-canary-xcs2n\" (UID: \"a18804a5-1680-4833-b3f6-1f02584286e1\") " pod="openshift-ingress-canary/ingress-canary-xcs2n" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.416098 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a5f3bc47-b766-47d3-b801-01f31edfe5ab-apiservice-cert\") pod \"packageserver-d55dfcdfc-x572x\" (UID: \"a5f3bc47-b766-47d3-b801-01f31edfe5ab\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x572x" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.416125 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/07bbf222-b692-4ffb-87e7-32cd55c110b9-srv-cert\") pod \"olm-operator-6b444d44fb-62zfg\" (UID: \"07bbf222-b692-4ffb-87e7-32cd55c110b9\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-62zfg" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.417399 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/62b9c370-77f9-489c-b34f-6ea6a45e4245-metrics-tls\") pod \"ingress-operator-5b745b69d9-447g6\" (UID: \"62b9c370-77f9-489c-b34f-6ea6a45e4245\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-447g6" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.417421 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/9bc42122-0f65-4008-b335-5539ebd4ad62-mountpoint-dir\") pod \"csi-hostpathplugin-nlkfk\" (UID: \"9bc42122-0f65-4008-b335-5539ebd4ad62\") " pod="hostpath-provisioner/csi-hostpathplugin-nlkfk" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.417523 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a18804a5-1680-4833-b3f6-1f02584286e1-cert\") pod \"ingress-canary-xcs2n\" (UID: \"a18804a5-1680-4833-b3f6-1f02584286e1\") " pod="openshift-ingress-canary/ingress-canary-xcs2n" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.417540 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e432b91b-278e-4a2f-81cc-4b7983a789da-service-ca-bundle\") pod \"router-default-5444994796-w9bdm\" (UID: \"e432b91b-278e-4a2f-81cc-4b7983a789da\") " pod="openshift-ingress/router-default-5444994796-w9bdm" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.417613 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/28527c1d-72f3-49b3-9a82-26aaae3ece08-certs\") pod \"machine-config-server-gjq4j\" (UID: \"28527c1d-72f3-49b3-9a82-26aaae3ece08\") " pod="openshift-machine-config-operator/machine-config-server-gjq4j" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.417681 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/9bc42122-0f65-4008-b335-5539ebd4ad62-csi-data-dir\") pod \"csi-hostpathplugin-nlkfk\" (UID: \"9bc42122-0f65-4008-b335-5539ebd4ad62\") " pod="hostpath-provisioner/csi-hostpathplugin-nlkfk" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.417723 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8e0c79e5-04c3-4242-9472-0cb67c34b499-secret-volume\") pod \"collect-profiles-29421540-z6t4d\" (UID: \"8e0c79e5-04c3-4242-9472-0cb67c34b499\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421540-z6t4d" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.417749 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/07bbf222-b692-4ffb-87e7-32cd55c110b9-profile-collector-cert\") pod \"olm-operator-6b444d44fb-62zfg\" (UID: \"07bbf222-b692-4ffb-87e7-32cd55c110b9\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-62zfg" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.417809 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/e432b91b-278e-4a2f-81cc-4b7983a789da-default-certificate\") pod \"router-default-5444994796-w9bdm\" (UID: \"e432b91b-278e-4a2f-81cc-4b7983a789da\") " pod="openshift-ingress/router-default-5444994796-w9bdm" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.417835 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-45x5s\" (UniqueName: \"kubernetes.io/projected/6995d782-f9c9-4e5a-95a6-07ac426ad59c-kube-api-access-45x5s\") pod \"openshift-controller-manager-operator-756b6f6bc6-c74ns\" (UID: \"6995d782-f9c9-4e5a-95a6-07ac426ad59c\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c74ns" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.417876 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/628cc532-1e6f-4a8e-a535-60d65177f38b-srv-cert\") pod \"catalog-operator-68c6474976-j9v7v\" (UID: \"628cc532-1e6f-4a8e-a535-60d65177f38b\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-j9v7v" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.417901 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c4635fa5-b3e0-458f-80ef-4f44cc8097fa-config\") pod \"service-ca-operator-777779d784-k9b7p\" (UID: \"c4635fa5-b3e0-458f-80ef-4f44cc8097fa\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-k9b7p" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.417939 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/e432b91b-278e-4a2f-81cc-4b7983a789da-stats-auth\") pod \"router-default-5444994796-w9bdm\" (UID: \"e432b91b-278e-4a2f-81cc-4b7983a789da\") " pod="openshift-ingress/router-default-5444994796-w9bdm" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.417967 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6995d782-f9c9-4e5a-95a6-07ac426ad59c-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-c74ns\" (UID: \"6995d782-f9c9-4e5a-95a6-07ac426ad59c\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c74ns" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.418013 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/e70d414d-6ada-448f-8047-73667b53d329-signing-key\") pod \"service-ca-9c57cc56f-jj67s\" (UID: \"e70d414d-6ada-448f-8047-73667b53d329\") " pod="openshift-service-ca/service-ca-9c57cc56f-jj67s" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.418069 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rwh5h\" (UniqueName: \"kubernetes.io/projected/f08b8fc4-fca7-4a7e-b42d-97f41d3cd136-kube-api-access-rwh5h\") pod \"openshift-config-operator-7777fb866f-nt28v\" (UID: \"f08b8fc4-fca7-4a7e-b42d-97f41d3cd136\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-nt28v" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.418119 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4kw5t\" (UniqueName: \"kubernetes.io/projected/4ff467d8-21a2-4f5f-a9eb-37800bfc01d8-kube-api-access-4kw5t\") pod \"cluster-samples-operator-665b6dd947-hswzp\" (UID: \"4ff467d8-21a2-4f5f-a9eb-37800bfc01d8\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-hswzp" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.418144 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/a25d26a9-7c6b-455e-9b8d-cc2fba08c576-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-2rcsb\" (UID: \"a25d26a9-7c6b-455e-9b8d-cc2fba08c576\") " pod="openshift-marketplace/marketplace-operator-79b997595-2rcsb" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.418201 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/62b9c370-77f9-489c-b34f-6ea6a45e4245-trusted-ca\") pod \"ingress-operator-5b745b69d9-447g6\" (UID: \"62b9c370-77f9-489c-b34f-6ea6a45e4245\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-447g6" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.418244 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nrczx\" (UniqueName: \"kubernetes.io/projected/8e0c79e5-04c3-4242-9472-0cb67c34b499-kube-api-access-nrczx\") pod \"collect-profiles-29421540-z6t4d\" (UID: \"8e0c79e5-04c3-4242-9472-0cb67c34b499\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421540-z6t4d" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.418275 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/4ff467d8-21a2-4f5f-a9eb-37800bfc01d8-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-hswzp\" (UID: \"4ff467d8-21a2-4f5f-a9eb-37800bfc01d8\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-hswzp" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.418335 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/9bc42122-0f65-4008-b335-5539ebd4ad62-registration-dir\") pod \"csi-hostpathplugin-nlkfk\" (UID: \"9bc42122-0f65-4008-b335-5539ebd4ad62\") " pod="hostpath-provisioner/csi-hostpathplugin-nlkfk" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.418399 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sfrbn\" (UniqueName: \"kubernetes.io/projected/c4635fa5-b3e0-458f-80ef-4f44cc8097fa-kube-api-access-sfrbn\") pod \"service-ca-operator-777779d784-k9b7p\" (UID: \"c4635fa5-b3e0-458f-80ef-4f44cc8097fa\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-k9b7p" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.418438 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/b7aaeb25-0a0e-4487-be02-0d847555a3ea-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-qgzhh\" (UID: \"b7aaeb25-0a0e-4487-be02-0d847555a3ea\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qgzhh" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.418481 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-djnbr\" (UniqueName: \"kubernetes.io/projected/b7aaeb25-0a0e-4487-be02-0d847555a3ea-kube-api-access-djnbr\") pod \"package-server-manager-789f6589d5-qgzhh\" (UID: \"b7aaeb25-0a0e-4487-be02-0d847555a3ea\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qgzhh" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.418507 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zhgjc\" (UniqueName: \"kubernetes.io/projected/62b9c370-77f9-489c-b34f-6ea6a45e4245-kube-api-access-zhgjc\") pod \"ingress-operator-5b745b69d9-447g6\" (UID: \"62b9c370-77f9-489c-b34f-6ea6a45e4245\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-447g6" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.418543 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/103c6227-5dc6-4322-ae14-201ad9e08295-config-volume\") pod \"dns-default-pvl7k\" (UID: \"103c6227-5dc6-4322-ae14-201ad9e08295\") " pod="openshift-dns/dns-default-pvl7k" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.418594 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.418637 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-627tr\" (UniqueName: \"kubernetes.io/projected/28527c1d-72f3-49b3-9a82-26aaae3ece08-kube-api-access-627tr\") pod \"machine-config-server-gjq4j\" (UID: \"28527c1d-72f3-49b3-9a82-26aaae3ece08\") " pod="openshift-machine-config-operator/machine-config-server-gjq4j" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.418662 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6x44\" (UniqueName: \"kubernetes.io/projected/9bc42122-0f65-4008-b335-5539ebd4ad62-kube-api-access-g6x44\") pod \"csi-hostpathplugin-nlkfk\" (UID: \"9bc42122-0f65-4008-b335-5539ebd4ad62\") " pod="hostpath-provisioner/csi-hostpathplugin-nlkfk" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.418690 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k9btn\" (UniqueName: \"kubernetes.io/projected/e432b91b-278e-4a2f-81cc-4b7983a789da-kube-api-access-k9btn\") pod \"router-default-5444994796-w9bdm\" (UID: \"e432b91b-278e-4a2f-81cc-4b7983a789da\") " pod="openshift-ingress/router-default-5444994796-w9bdm" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.418732 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/f08b8fc4-fca7-4a7e-b42d-97f41d3cd136-available-featuregates\") pod \"openshift-config-operator-7777fb866f-nt28v\" (UID: \"f08b8fc4-fca7-4a7e-b42d-97f41d3cd136\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-nt28v" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.418757 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a5f3bc47-b766-47d3-b801-01f31edfe5ab-webhook-cert\") pod \"packageserver-d55dfcdfc-x572x\" (UID: \"a5f3bc47-b766-47d3-b801-01f31edfe5ab\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x572x" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.418799 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tvmqg\" (UniqueName: \"kubernetes.io/projected/e70d414d-6ada-448f-8047-73667b53d329-kube-api-access-tvmqg\") pod \"service-ca-9c57cc56f-jj67s\" (UID: \"e70d414d-6ada-448f-8047-73667b53d329\") " pod="openshift-service-ca/service-ca-9c57cc56f-jj67s" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.418855 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/9bc42122-0f65-4008-b335-5539ebd4ad62-plugins-dir\") pod \"csi-hostpathplugin-nlkfk\" (UID: \"9bc42122-0f65-4008-b335-5539ebd4ad62\") " pod="hostpath-provisioner/csi-hostpathplugin-nlkfk" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.418880 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/103c6227-5dc6-4322-ae14-201ad9e08295-metrics-tls\") pod \"dns-default-pvl7k\" (UID: \"103c6227-5dc6-4322-ae14-201ad9e08295\") " pod="openshift-dns/dns-default-pvl7k" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.418905 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8e0c79e5-04c3-4242-9472-0cb67c34b499-config-volume\") pod \"collect-profiles-29421540-z6t4d\" (UID: \"8e0c79e5-04c3-4242-9472-0cb67c34b499\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421540-z6t4d" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.418964 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/62b9c370-77f9-489c-b34f-6ea6a45e4245-bound-sa-token\") pod \"ingress-operator-5b745b69d9-447g6\" (UID: \"62b9c370-77f9-489c-b34f-6ea6a45e4245\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-447g6" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.418989 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/9bc42122-0f65-4008-b335-5539ebd4ad62-socket-dir\") pod \"csi-hostpathplugin-nlkfk\" (UID: \"9bc42122-0f65-4008-b335-5539ebd4ad62\") " pod="hostpath-provisioner/csi-hostpathplugin-nlkfk" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.419013 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/628cc532-1e6f-4a8e-a535-60d65177f38b-profile-collector-cert\") pod \"catalog-operator-68c6474976-j9v7v\" (UID: \"628cc532-1e6f-4a8e-a535-60d65177f38b\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-j9v7v" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.419071 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a25d26a9-7c6b-455e-9b8d-cc2fba08c576-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-2rcsb\" (UID: \"a25d26a9-7c6b-455e-9b8d-cc2fba08c576\") " pod="openshift-marketplace/marketplace-operator-79b997595-2rcsb" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.419725 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvtqx\" (UniqueName: \"kubernetes.io/projected/07bbf222-b692-4ffb-87e7-32cd55c110b9-kube-api-access-zvtqx\") pod \"olm-operator-6b444d44fb-62zfg\" (UID: \"07bbf222-b692-4ffb-87e7-32cd55c110b9\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-62zfg" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.419795 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6995d782-f9c9-4e5a-95a6-07ac426ad59c-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-c74ns\" (UID: \"6995d782-f9c9-4e5a-95a6-07ac426ad59c\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c74ns" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.419820 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b5bx2\" (UniqueName: \"kubernetes.io/projected/a5f3bc47-b766-47d3-b801-01f31edfe5ab-kube-api-access-b5bx2\") pod \"packageserver-d55dfcdfc-x572x\" (UID: \"a5f3bc47-b766-47d3-b801-01f31edfe5ab\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x572x" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.419893 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/28527c1d-72f3-49b3-9a82-26aaae3ece08-node-bootstrap-token\") pod \"machine-config-server-gjq4j\" (UID: \"28527c1d-72f3-49b3-9a82-26aaae3ece08\") " pod="openshift-machine-config-operator/machine-config-server-gjq4j" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.419928 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/a5f3bc47-b766-47d3-b801-01f31edfe5ab-tmpfs\") pod \"packageserver-d55dfcdfc-x572x\" (UID: \"a5f3bc47-b766-47d3-b801-01f31edfe5ab\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x572x" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.419551 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/f08b8fc4-fca7-4a7e-b42d-97f41d3cd136-available-featuregates\") pod \"openshift-config-operator-7777fb866f-nt28v\" (UID: \"f08b8fc4-fca7-4a7e-b42d-97f41d3cd136\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-nt28v" Dec 09 15:10:50 crc kubenswrapper[4716]: E1209 15:10:50.419955 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:50.919937806 +0000 UTC m=+138.074681864 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.420014 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e432b91b-278e-4a2f-81cc-4b7983a789da-metrics-certs\") pod \"router-default-5444994796-w9bdm\" (UID: \"e432b91b-278e-4a2f-81cc-4b7983a789da\") " pod="openshift-ingress/router-default-5444994796-w9bdm" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.421319 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6995d782-f9c9-4e5a-95a6-07ac426ad59c-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-c74ns\" (UID: \"6995d782-f9c9-4e5a-95a6-07ac426ad59c\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c74ns" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.423227 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/62b9c370-77f9-489c-b34f-6ea6a45e4245-trusted-ca\") pod \"ingress-operator-5b745b69d9-447g6\" (UID: \"62b9c370-77f9-489c-b34f-6ea6a45e4245\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-447g6" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.425319 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6995d782-f9c9-4e5a-95a6-07ac426ad59c-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-c74ns\" (UID: \"6995d782-f9c9-4e5a-95a6-07ac426ad59c\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c74ns" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.427011 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e432b91b-278e-4a2f-81cc-4b7983a789da-metrics-certs\") pod \"router-default-5444994796-w9bdm\" (UID: \"e432b91b-278e-4a2f-81cc-4b7983a789da\") " pod="openshift-ingress/router-default-5444994796-w9bdm" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.427122 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/4ff467d8-21a2-4f5f-a9eb-37800bfc01d8-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-hswzp\" (UID: \"4ff467d8-21a2-4f5f-a9eb-37800bfc01d8\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-hswzp" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.427779 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f08b8fc4-fca7-4a7e-b42d-97f41d3cd136-serving-cert\") pod \"openshift-config-operator-7777fb866f-nt28v\" (UID: \"f08b8fc4-fca7-4a7e-b42d-97f41d3cd136\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-nt28v" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.429115 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/62b9c370-77f9-489c-b34f-6ea6a45e4245-metrics-tls\") pod \"ingress-operator-5b745b69d9-447g6\" (UID: \"62b9c370-77f9-489c-b34f-6ea6a45e4245\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-447g6" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.430964 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/e432b91b-278e-4a2f-81cc-4b7983a789da-stats-auth\") pod \"router-default-5444994796-w9bdm\" (UID: \"e432b91b-278e-4a2f-81cc-4b7983a789da\") " pod="openshift-ingress/router-default-5444994796-w9bdm" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.432106 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/e432b91b-278e-4a2f-81cc-4b7983a789da-default-certificate\") pod \"router-default-5444994796-w9bdm\" (UID: \"e432b91b-278e-4a2f-81cc-4b7983a789da\") " pod="openshift-ingress/router-default-5444994796-w9bdm" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.467150 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rwh5h\" (UniqueName: \"kubernetes.io/projected/f08b8fc4-fca7-4a7e-b42d-97f41d3cd136-kube-api-access-rwh5h\") pod \"openshift-config-operator-7777fb866f-nt28v\" (UID: \"f08b8fc4-fca7-4a7e-b42d-97f41d3cd136\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-nt28v" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.477290 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-nt28v" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.497644 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-45x5s\" (UniqueName: \"kubernetes.io/projected/6995d782-f9c9-4e5a-95a6-07ac426ad59c-kube-api-access-45x5s\") pod \"openshift-controller-manager-operator-756b6f6bc6-c74ns\" (UID: \"6995d782-f9c9-4e5a-95a6-07ac426ad59c\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c74ns" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.501374 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zhgjc\" (UniqueName: \"kubernetes.io/projected/62b9c370-77f9-489c-b34f-6ea6a45e4245-kube-api-access-zhgjc\") pod \"ingress-operator-5b745b69d9-447g6\" (UID: \"62b9c370-77f9-489c-b34f-6ea6a45e4245\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-447g6" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.520827 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.521022 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/103c6227-5dc6-4322-ae14-201ad9e08295-config-volume\") pod \"dns-default-pvl7k\" (UID: \"103c6227-5dc6-4322-ae14-201ad9e08295\") " pod="openshift-dns/dns-default-pvl7k" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.521066 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-627tr\" (UniqueName: \"kubernetes.io/projected/28527c1d-72f3-49b3-9a82-26aaae3ece08-kube-api-access-627tr\") pod \"machine-config-server-gjq4j\" (UID: \"28527c1d-72f3-49b3-9a82-26aaae3ece08\") " pod="openshift-machine-config-operator/machine-config-server-gjq4j" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.521088 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g6x44\" (UniqueName: \"kubernetes.io/projected/9bc42122-0f65-4008-b335-5539ebd4ad62-kube-api-access-g6x44\") pod \"csi-hostpathplugin-nlkfk\" (UID: \"9bc42122-0f65-4008-b335-5539ebd4ad62\") " pod="hostpath-provisioner/csi-hostpathplugin-nlkfk" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.521112 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a5f3bc47-b766-47d3-b801-01f31edfe5ab-webhook-cert\") pod \"packageserver-d55dfcdfc-x572x\" (UID: \"a5f3bc47-b766-47d3-b801-01f31edfe5ab\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x572x" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.521133 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tvmqg\" (UniqueName: \"kubernetes.io/projected/e70d414d-6ada-448f-8047-73667b53d329-kube-api-access-tvmqg\") pod \"service-ca-9c57cc56f-jj67s\" (UID: \"e70d414d-6ada-448f-8047-73667b53d329\") " pod="openshift-service-ca/service-ca-9c57cc56f-jj67s" Dec 09 15:10:50 crc kubenswrapper[4716]: E1209 15:10:50.521157 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:51.021131511 +0000 UTC m=+138.175875499 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.521294 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/9bc42122-0f65-4008-b335-5539ebd4ad62-plugins-dir\") pod \"csi-hostpathplugin-nlkfk\" (UID: \"9bc42122-0f65-4008-b335-5539ebd4ad62\") " pod="hostpath-provisioner/csi-hostpathplugin-nlkfk" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.521339 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/9bc42122-0f65-4008-b335-5539ebd4ad62-socket-dir\") pod \"csi-hostpathplugin-nlkfk\" (UID: \"9bc42122-0f65-4008-b335-5539ebd4ad62\") " pod="hostpath-provisioner/csi-hostpathplugin-nlkfk" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.521357 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/103c6227-5dc6-4322-ae14-201ad9e08295-metrics-tls\") pod \"dns-default-pvl7k\" (UID: \"103c6227-5dc6-4322-ae14-201ad9e08295\") " pod="openshift-dns/dns-default-pvl7k" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.521377 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8e0c79e5-04c3-4242-9472-0cb67c34b499-config-volume\") pod \"collect-profiles-29421540-z6t4d\" (UID: \"8e0c79e5-04c3-4242-9472-0cb67c34b499\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421540-z6t4d" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.521396 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/628cc532-1e6f-4a8e-a535-60d65177f38b-profile-collector-cert\") pod \"catalog-operator-68c6474976-j9v7v\" (UID: \"628cc532-1e6f-4a8e-a535-60d65177f38b\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-j9v7v" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.521416 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a25d26a9-7c6b-455e-9b8d-cc2fba08c576-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-2rcsb\" (UID: \"a25d26a9-7c6b-455e-9b8d-cc2fba08c576\") " pod="openshift-marketplace/marketplace-operator-79b997595-2rcsb" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.521433 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvtqx\" (UniqueName: \"kubernetes.io/projected/07bbf222-b692-4ffb-87e7-32cd55c110b9-kube-api-access-zvtqx\") pod \"olm-operator-6b444d44fb-62zfg\" (UID: \"07bbf222-b692-4ffb-87e7-32cd55c110b9\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-62zfg" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.521456 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b5bx2\" (UniqueName: \"kubernetes.io/projected/a5f3bc47-b766-47d3-b801-01f31edfe5ab-kube-api-access-b5bx2\") pod \"packageserver-d55dfcdfc-x572x\" (UID: \"a5f3bc47-b766-47d3-b801-01f31edfe5ab\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x572x" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.521472 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/28527c1d-72f3-49b3-9a82-26aaae3ece08-node-bootstrap-token\") pod \"machine-config-server-gjq4j\" (UID: \"28527c1d-72f3-49b3-9a82-26aaae3ece08\") " pod="openshift-machine-config-operator/machine-config-server-gjq4j" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.521500 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/a5f3bc47-b766-47d3-b801-01f31edfe5ab-tmpfs\") pod \"packageserver-d55dfcdfc-x572x\" (UID: \"a5f3bc47-b766-47d3-b801-01f31edfe5ab\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x572x" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.521517 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5d22l\" (UniqueName: \"kubernetes.io/projected/628cc532-1e6f-4a8e-a535-60d65177f38b-kube-api-access-5d22l\") pod \"catalog-operator-68c6474976-j9v7v\" (UID: \"628cc532-1e6f-4a8e-a535-60d65177f38b\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-j9v7v" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.521547 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c4635fa5-b3e0-458f-80ef-4f44cc8097fa-serving-cert\") pod \"service-ca-operator-777779d784-k9b7p\" (UID: \"c4635fa5-b3e0-458f-80ef-4f44cc8097fa\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-k9b7p" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.521569 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5jq5p\" (UniqueName: \"kubernetes.io/projected/a25d26a9-7c6b-455e-9b8d-cc2fba08c576-kube-api-access-5jq5p\") pod \"marketplace-operator-79b997595-2rcsb\" (UID: \"a25d26a9-7c6b-455e-9b8d-cc2fba08c576\") " pod="openshift-marketplace/marketplace-operator-79b997595-2rcsb" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.521602 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tvh6z\" (UniqueName: \"kubernetes.io/projected/103c6227-5dc6-4322-ae14-201ad9e08295-kube-api-access-tvh6z\") pod \"dns-default-pvl7k\" (UID: \"103c6227-5dc6-4322-ae14-201ad9e08295\") " pod="openshift-dns/dns-default-pvl7k" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.521630 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/e70d414d-6ada-448f-8047-73667b53d329-signing-cabundle\") pod \"service-ca-9c57cc56f-jj67s\" (UID: \"e70d414d-6ada-448f-8047-73667b53d329\") " pod="openshift-service-ca/service-ca-9c57cc56f-jj67s" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.521661 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7lm99\" (UniqueName: \"kubernetes.io/projected/a18804a5-1680-4833-b3f6-1f02584286e1-kube-api-access-7lm99\") pod \"ingress-canary-xcs2n\" (UID: \"a18804a5-1680-4833-b3f6-1f02584286e1\") " pod="openshift-ingress-canary/ingress-canary-xcs2n" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.521682 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a5f3bc47-b766-47d3-b801-01f31edfe5ab-apiservice-cert\") pod \"packageserver-d55dfcdfc-x572x\" (UID: \"a5f3bc47-b766-47d3-b801-01f31edfe5ab\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x572x" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.521702 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/07bbf222-b692-4ffb-87e7-32cd55c110b9-srv-cert\") pod \"olm-operator-6b444d44fb-62zfg\" (UID: \"07bbf222-b692-4ffb-87e7-32cd55c110b9\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-62zfg" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.521729 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/9bc42122-0f65-4008-b335-5539ebd4ad62-mountpoint-dir\") pod \"csi-hostpathplugin-nlkfk\" (UID: \"9bc42122-0f65-4008-b335-5539ebd4ad62\") " pod="hostpath-provisioner/csi-hostpathplugin-nlkfk" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.521748 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a18804a5-1680-4833-b3f6-1f02584286e1-cert\") pod \"ingress-canary-xcs2n\" (UID: \"a18804a5-1680-4833-b3f6-1f02584286e1\") " pod="openshift-ingress-canary/ingress-canary-xcs2n" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.521772 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/28527c1d-72f3-49b3-9a82-26aaae3ece08-certs\") pod \"machine-config-server-gjq4j\" (UID: \"28527c1d-72f3-49b3-9a82-26aaae3ece08\") " pod="openshift-machine-config-operator/machine-config-server-gjq4j" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.521788 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/9bc42122-0f65-4008-b335-5539ebd4ad62-csi-data-dir\") pod \"csi-hostpathplugin-nlkfk\" (UID: \"9bc42122-0f65-4008-b335-5539ebd4ad62\") " pod="hostpath-provisioner/csi-hostpathplugin-nlkfk" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.521804 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8e0c79e5-04c3-4242-9472-0cb67c34b499-secret-volume\") pod \"collect-profiles-29421540-z6t4d\" (UID: \"8e0c79e5-04c3-4242-9472-0cb67c34b499\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421540-z6t4d" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.521822 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/07bbf222-b692-4ffb-87e7-32cd55c110b9-profile-collector-cert\") pod \"olm-operator-6b444d44fb-62zfg\" (UID: \"07bbf222-b692-4ffb-87e7-32cd55c110b9\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-62zfg" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.521839 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/628cc532-1e6f-4a8e-a535-60d65177f38b-srv-cert\") pod \"catalog-operator-68c6474976-j9v7v\" (UID: \"628cc532-1e6f-4a8e-a535-60d65177f38b\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-j9v7v" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.521854 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c4635fa5-b3e0-458f-80ef-4f44cc8097fa-config\") pod \"service-ca-operator-777779d784-k9b7p\" (UID: \"c4635fa5-b3e0-458f-80ef-4f44cc8097fa\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-k9b7p" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.521877 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/e70d414d-6ada-448f-8047-73667b53d329-signing-key\") pod \"service-ca-9c57cc56f-jj67s\" (UID: \"e70d414d-6ada-448f-8047-73667b53d329\") " pod="openshift-service-ca/service-ca-9c57cc56f-jj67s" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.521905 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/a25d26a9-7c6b-455e-9b8d-cc2fba08c576-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-2rcsb\" (UID: \"a25d26a9-7c6b-455e-9b8d-cc2fba08c576\") " pod="openshift-marketplace/marketplace-operator-79b997595-2rcsb" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.521929 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nrczx\" (UniqueName: \"kubernetes.io/projected/8e0c79e5-04c3-4242-9472-0cb67c34b499-kube-api-access-nrczx\") pod \"collect-profiles-29421540-z6t4d\" (UID: \"8e0c79e5-04c3-4242-9472-0cb67c34b499\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421540-z6t4d" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.521952 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/9bc42122-0f65-4008-b335-5539ebd4ad62-registration-dir\") pod \"csi-hostpathplugin-nlkfk\" (UID: \"9bc42122-0f65-4008-b335-5539ebd4ad62\") " pod="hostpath-provisioner/csi-hostpathplugin-nlkfk" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.521955 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/103c6227-5dc6-4322-ae14-201ad9e08295-config-volume\") pod \"dns-default-pvl7k\" (UID: \"103c6227-5dc6-4322-ae14-201ad9e08295\") " pod="openshift-dns/dns-default-pvl7k" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.521970 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sfrbn\" (UniqueName: \"kubernetes.io/projected/c4635fa5-b3e0-458f-80ef-4f44cc8097fa-kube-api-access-sfrbn\") pod \"service-ca-operator-777779d784-k9b7p\" (UID: \"c4635fa5-b3e0-458f-80ef-4f44cc8097fa\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-k9b7p" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.521993 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/b7aaeb25-0a0e-4487-be02-0d847555a3ea-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-qgzhh\" (UID: \"b7aaeb25-0a0e-4487-be02-0d847555a3ea\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qgzhh" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.522013 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-djnbr\" (UniqueName: \"kubernetes.io/projected/b7aaeb25-0a0e-4487-be02-0d847555a3ea-kube-api-access-djnbr\") pod \"package-server-manager-789f6589d5-qgzhh\" (UID: \"b7aaeb25-0a0e-4487-be02-0d847555a3ea\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qgzhh" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.522593 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/9bc42122-0f65-4008-b335-5539ebd4ad62-plugins-dir\") pod \"csi-hostpathplugin-nlkfk\" (UID: \"9bc42122-0f65-4008-b335-5539ebd4ad62\") " pod="hostpath-provisioner/csi-hostpathplugin-nlkfk" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.522690 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/9bc42122-0f65-4008-b335-5539ebd4ad62-socket-dir\") pod \"csi-hostpathplugin-nlkfk\" (UID: \"9bc42122-0f65-4008-b335-5539ebd4ad62\") " pod="hostpath-provisioner/csi-hostpathplugin-nlkfk" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.535797 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/07bbf222-b692-4ffb-87e7-32cd55c110b9-srv-cert\") pod \"olm-operator-6b444d44fb-62zfg\" (UID: \"07bbf222-b692-4ffb-87e7-32cd55c110b9\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-62zfg" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.536049 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/62b9c370-77f9-489c-b34f-6ea6a45e4245-bound-sa-token\") pod \"ingress-operator-5b745b69d9-447g6\" (UID: \"62b9c370-77f9-489c-b34f-6ea6a45e4245\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-447g6" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.536769 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8e0c79e5-04c3-4242-9472-0cb67c34b499-config-volume\") pod \"collect-profiles-29421540-z6t4d\" (UID: \"8e0c79e5-04c3-4242-9472-0cb67c34b499\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421540-z6t4d" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.539398 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c4635fa5-b3e0-458f-80ef-4f44cc8097fa-config\") pod \"service-ca-operator-777779d784-k9b7p\" (UID: \"c4635fa5-b3e0-458f-80ef-4f44cc8097fa\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-k9b7p" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.541268 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/9bc42122-0f65-4008-b335-5539ebd4ad62-mountpoint-dir\") pod \"csi-hostpathplugin-nlkfk\" (UID: \"9bc42122-0f65-4008-b335-5539ebd4ad62\") " pod="hostpath-provisioner/csi-hostpathplugin-nlkfk" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.546293 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a18804a5-1680-4833-b3f6-1f02584286e1-cert\") pod \"ingress-canary-xcs2n\" (UID: \"a18804a5-1680-4833-b3f6-1f02584286e1\") " pod="openshift-ingress-canary/ingress-canary-xcs2n" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.549573 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a25d26a9-7c6b-455e-9b8d-cc2fba08c576-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-2rcsb\" (UID: \"a25d26a9-7c6b-455e-9b8d-cc2fba08c576\") " pod="openshift-marketplace/marketplace-operator-79b997595-2rcsb" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.552000 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/e70d414d-6ada-448f-8047-73667b53d329-signing-cabundle\") pod \"service-ca-9c57cc56f-jj67s\" (UID: \"e70d414d-6ada-448f-8047-73667b53d329\") " pod="openshift-service-ca/service-ca-9c57cc56f-jj67s" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.556344 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/628cc532-1e6f-4a8e-a535-60d65177f38b-profile-collector-cert\") pod \"catalog-operator-68c6474976-j9v7v\" (UID: \"628cc532-1e6f-4a8e-a535-60d65177f38b\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-j9v7v" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.556878 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/628cc532-1e6f-4a8e-a535-60d65177f38b-srv-cert\") pod \"catalog-operator-68c6474976-j9v7v\" (UID: \"628cc532-1e6f-4a8e-a535-60d65177f38b\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-j9v7v" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.559007 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/9bc42122-0f65-4008-b335-5539ebd4ad62-registration-dir\") pod \"csi-hostpathplugin-nlkfk\" (UID: \"9bc42122-0f65-4008-b335-5539ebd4ad62\") " pod="hostpath-provisioner/csi-hostpathplugin-nlkfk" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.571337 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/9bc42122-0f65-4008-b335-5539ebd4ad62-csi-data-dir\") pod \"csi-hostpathplugin-nlkfk\" (UID: \"9bc42122-0f65-4008-b335-5539ebd4ad62\") " pod="hostpath-provisioner/csi-hostpathplugin-nlkfk" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.573900 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/e70d414d-6ada-448f-8047-73667b53d329-signing-key\") pod \"service-ca-9c57cc56f-jj67s\" (UID: \"e70d414d-6ada-448f-8047-73667b53d329\") " pod="openshift-service-ca/service-ca-9c57cc56f-jj67s" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.574274 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k9btn\" (UniqueName: \"kubernetes.io/projected/e432b91b-278e-4a2f-81cc-4b7983a789da-kube-api-access-k9btn\") pod \"router-default-5444994796-w9bdm\" (UID: \"e432b91b-278e-4a2f-81cc-4b7983a789da\") " pod="openshift-ingress/router-default-5444994796-w9bdm" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.574313 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/a5f3bc47-b766-47d3-b801-01f31edfe5ab-tmpfs\") pod \"packageserver-d55dfcdfc-x572x\" (UID: \"a5f3bc47-b766-47d3-b801-01f31edfe5ab\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x572x" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.577078 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/07bbf222-b692-4ffb-87e7-32cd55c110b9-profile-collector-cert\") pod \"olm-operator-6b444d44fb-62zfg\" (UID: \"07bbf222-b692-4ffb-87e7-32cd55c110b9\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-62zfg" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.581742 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/28527c1d-72f3-49b3-9a82-26aaae3ece08-node-bootstrap-token\") pod \"machine-config-server-gjq4j\" (UID: \"28527c1d-72f3-49b3-9a82-26aaae3ece08\") " pod="openshift-machine-config-operator/machine-config-server-gjq4j" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.587466 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8e0c79e5-04c3-4242-9472-0cb67c34b499-secret-volume\") pod \"collect-profiles-29421540-z6t4d\" (UID: \"8e0c79e5-04c3-4242-9472-0cb67c34b499\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421540-z6t4d" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.588116 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/b7aaeb25-0a0e-4487-be02-0d847555a3ea-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-qgzhh\" (UID: \"b7aaeb25-0a0e-4487-be02-0d847555a3ea\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qgzhh" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.602850 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4kw5t\" (UniqueName: \"kubernetes.io/projected/4ff467d8-21a2-4f5f-a9eb-37800bfc01d8-kube-api-access-4kw5t\") pod \"cluster-samples-operator-665b6dd947-hswzp\" (UID: \"4ff467d8-21a2-4f5f-a9eb-37800bfc01d8\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-hswzp" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.606164 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-w9bdm" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.619656 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tvmqg\" (UniqueName: \"kubernetes.io/projected/e70d414d-6ada-448f-8047-73667b53d329-kube-api-access-tvmqg\") pod \"service-ca-9c57cc56f-jj67s\" (UID: \"e70d414d-6ada-448f-8047-73667b53d329\") " pod="openshift-service-ca/service-ca-9c57cc56f-jj67s" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.622734 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:50 crc kubenswrapper[4716]: E1209 15:10:50.623323 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:51.123306853 +0000 UTC m=+138.278050841 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.636431 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/103c6227-5dc6-4322-ae14-201ad9e08295-metrics-tls\") pod \"dns-default-pvl7k\" (UID: \"103c6227-5dc6-4322-ae14-201ad9e08295\") " pod="openshift-dns/dns-default-pvl7k" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.637707 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a5f3bc47-b766-47d3-b801-01f31edfe5ab-apiservice-cert\") pod \"packageserver-d55dfcdfc-x572x\" (UID: \"a5f3bc47-b766-47d3-b801-01f31edfe5ab\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x572x" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.638260 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/a25d26a9-7c6b-455e-9b8d-cc2fba08c576-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-2rcsb\" (UID: \"a25d26a9-7c6b-455e-9b8d-cc2fba08c576\") " pod="openshift-marketplace/marketplace-operator-79b997595-2rcsb" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.638597 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a5f3bc47-b766-47d3-b801-01f31edfe5ab-webhook-cert\") pod \"packageserver-d55dfcdfc-x572x\" (UID: \"a5f3bc47-b766-47d3-b801-01f31edfe5ab\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x572x" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.638606 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c4635fa5-b3e0-458f-80ef-4f44cc8097fa-serving-cert\") pod \"service-ca-operator-777779d784-k9b7p\" (UID: \"c4635fa5-b3e0-458f-80ef-4f44cc8097fa\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-k9b7p" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.638801 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-627tr\" (UniqueName: \"kubernetes.io/projected/28527c1d-72f3-49b3-9a82-26aaae3ece08-kube-api-access-627tr\") pod \"machine-config-server-gjq4j\" (UID: \"28527c1d-72f3-49b3-9a82-26aaae3ece08\") " pod="openshift-machine-config-operator/machine-config-server-gjq4j" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.639271 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/28527c1d-72f3-49b3-9a82-26aaae3ece08-certs\") pod \"machine-config-server-gjq4j\" (UID: \"28527c1d-72f3-49b3-9a82-26aaae3ece08\") " pod="openshift-machine-config-operator/machine-config-server-gjq4j" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.642732 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g6x44\" (UniqueName: \"kubernetes.io/projected/9bc42122-0f65-4008-b335-5539ebd4ad62-kube-api-access-g6x44\") pod \"csi-hostpathplugin-nlkfk\" (UID: \"9bc42122-0f65-4008-b335-5539ebd4ad62\") " pod="hostpath-provisioner/csi-hostpathplugin-nlkfk" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.654570 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c74ns" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.689053 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-447g6" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.710908 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-djnbr\" (UniqueName: \"kubernetes.io/projected/b7aaeb25-0a0e-4487-be02-0d847555a3ea-kube-api-access-djnbr\") pod \"package-server-manager-789f6589d5-qgzhh\" (UID: \"b7aaeb25-0a0e-4487-be02-0d847555a3ea\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qgzhh" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.712667 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nrczx\" (UniqueName: \"kubernetes.io/projected/8e0c79e5-04c3-4242-9472-0cb67c34b499-kube-api-access-nrczx\") pod \"collect-profiles-29421540-z6t4d\" (UID: \"8e0c79e5-04c3-4242-9472-0cb67c34b499\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421540-z6t4d" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.724534 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:50 crc kubenswrapper[4716]: E1209 15:10:50.725205 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:51.225179686 +0000 UTC m=+138.379923674 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.729195 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tvh6z\" (UniqueName: \"kubernetes.io/projected/103c6227-5dc6-4322-ae14-201ad9e08295-kube-api-access-tvh6z\") pod \"dns-default-pvl7k\" (UID: \"103c6227-5dc6-4322-ae14-201ad9e08295\") " pod="openshift-dns/dns-default-pvl7k" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.736524 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-jj67s" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.736966 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5jq5p\" (UniqueName: \"kubernetes.io/projected/a25d26a9-7c6b-455e-9b8d-cc2fba08c576-kube-api-access-5jq5p\") pod \"marketplace-operator-79b997595-2rcsb\" (UID: \"a25d26a9-7c6b-455e-9b8d-cc2fba08c576\") " pod="openshift-marketplace/marketplace-operator-79b997595-2rcsb" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.741283 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qgzhh" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.749904 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421540-z6t4d" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.754183 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7lm99\" (UniqueName: \"kubernetes.io/projected/a18804a5-1680-4833-b3f6-1f02584286e1-kube-api-access-7lm99\") pod \"ingress-canary-xcs2n\" (UID: \"a18804a5-1680-4833-b3f6-1f02584286e1\") " pod="openshift-ingress-canary/ingress-canary-xcs2n" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.769709 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sfrbn\" (UniqueName: \"kubernetes.io/projected/c4635fa5-b3e0-458f-80ef-4f44cc8097fa-kube-api-access-sfrbn\") pod \"service-ca-operator-777779d784-k9b7p\" (UID: \"c4635fa5-b3e0-458f-80ef-4f44cc8097fa\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-k9b7p" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.770022 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-hswzp" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.780038 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-qwkjm"] Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.791807 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-k9b7p" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.797066 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvtqx\" (UniqueName: \"kubernetes.io/projected/07bbf222-b692-4ffb-87e7-32cd55c110b9-kube-api-access-zvtqx\") pod \"olm-operator-6b444d44fb-62zfg\" (UID: \"07bbf222-b692-4ffb-87e7-32cd55c110b9\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-62zfg" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.805498 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-2rcsb" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.813018 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-pvl7k" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.813324 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5d22l\" (UniqueName: \"kubernetes.io/projected/628cc532-1e6f-4a8e-a535-60d65177f38b-kube-api-access-5d22l\") pod \"catalog-operator-68c6474976-j9v7v\" (UID: \"628cc532-1e6f-4a8e-a535-60d65177f38b\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-j9v7v" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.825276 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b5bx2\" (UniqueName: \"kubernetes.io/projected/a5f3bc47-b766-47d3-b801-01f31edfe5ab-kube-api-access-b5bx2\") pod \"packageserver-d55dfcdfc-x572x\" (UID: \"a5f3bc47-b766-47d3-b801-01f31edfe5ab\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x572x" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.826027 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:50 crc kubenswrapper[4716]: E1209 15:10:50.826633 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:51.326589537 +0000 UTC m=+138.481333705 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.848414 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-nlkfk" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.856514 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-xcs2n" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.865159 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-gjq4j" Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.927042 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:50 crc kubenswrapper[4716]: E1209 15:10:50.927739 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:51.427717649 +0000 UTC m=+138.582461637 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.924615 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-87wwr" event={"ID":"a85db618-f9aa-44e6-8060-3b660c70940d","Type":"ContainerStarted","Data":"14cd955813768bd690e6498ecdb37292ef5e85aaeadda9aa301e79706613e168"} Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.937857 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-gjrqw" event={"ID":"3924e326-c048-4d29-883f-9b2a07edfe5a","Type":"ContainerStarted","Data":"b82c12c0c7b00e050976967848bd81c28a06d53f33eed2c0d086e658886044d2"} Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.953948 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-8svp5" event={"ID":"326772eb-ce2f-4f9b-bee5-8265a8762f26","Type":"ContainerStarted","Data":"67bf91572a16b11c54205f0ce3870f5afb652318c0f610707daa6489052660ec"} Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.974077 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-8qjxz" event={"ID":"31afed1d-4e1c-491e-b54b-a5e7e24077f1","Type":"ContainerStarted","Data":"f7f0347710f9756435484f8bcae01839b440b3c1a3ceacdb72b4ebae544d0004"} Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.979715 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-w6p7n" event={"ID":"277458f8-729e-4250-b4d0-db21713e4e48","Type":"ContainerStarted","Data":"162ed62de6574c5d4d13ded12d41402d54680ef024dbee89b130fc543b50a7fc"} Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.982992 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" event={"ID":"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4","Type":"ContainerStarted","Data":"0c25f27379bb242954552c4b3178d9bfec53e85ba511c7880f315c33900982f1"} Dec 09 15:10:50 crc kubenswrapper[4716]: I1209 15:10:50.993907 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pfn66" event={"ID":"d90937ae-2446-41f4-94eb-e928c5d449de","Type":"ContainerStarted","Data":"537d2f96c04f48ad73087db180f4f639e8485bba528eb54d2df37f724faaa90d"} Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.019336 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-w9bdm" event={"ID":"e432b91b-278e-4a2f-81cc-4b7983a789da","Type":"ContainerStarted","Data":"7226a5939a3a3df58273046d685385dac07a1e53267833589839cae4340fd189"} Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.025024 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dxrvh" event={"ID":"002d496a-6bac-491c-bf18-2b4a325f78ec","Type":"ContainerStarted","Data":"149e7173c71dc045b78a6935cb6e48f8f146569ecd31f62cacca48d39afdfdaa"} Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.029874 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:51 crc kubenswrapper[4716]: E1209 15:10:51.030219 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:51.53020373 +0000 UTC m=+138.684947718 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.035442 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-bs6mn" event={"ID":"4acd966d-4bae-456f-bffd-9ad6533cc66d","Type":"ContainerStarted","Data":"6071d60b07474389c76e522b45fbce6a4ae60444689d677ed85606ec7aeb2c9b"} Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.036533 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-bs6mn" Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.038266 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-9ndtx" event={"ID":"646b361f-bd5c-4be0-a52c-735688c0f88a","Type":"ContainerStarted","Data":"0f7dd4309104dd4c861f4b19378632e3e0e7041a34c1cd2fd976ba357783b440"} Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.040397 4716 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-bs6mn container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.040460 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-bs6mn" podUID="4acd966d-4bae-456f-bffd-9ad6533cc66d" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.044372 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-qj9vv"] Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.049947 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-ckjw6" event={"ID":"8c6090d1-1d37-4305-9cbf-3c76c3237777","Type":"ContainerStarted","Data":"9c52210cf493d1dc4e76d0992dddb115f687b098b448d6a7635c3bc00b8c9d69"} Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.058166 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-62zfg" Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.069277 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-j9v7v" Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.080088 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x572x" Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.081429 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-ktjk2"] Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.083653 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-h9txk"] Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.086184 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" event={"ID":"a16e5eaf-14b0-47eb-8bc9-dfedd024c719","Type":"ContainerStarted","Data":"2c300a2e81d6ae7f956460d43e4098ac03bb86cd6a08d9340ba6232b3f3010d9"} Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.106965 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-2xfp8"] Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.131329 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:51 crc kubenswrapper[4716]: E1209 15:10:51.133227 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:51.633181855 +0000 UTC m=+138.787926013 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.240539 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:51 crc kubenswrapper[4716]: E1209 15:10:51.241571 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:51.74155033 +0000 UTC m=+138.896294328 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.341544 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:51 crc kubenswrapper[4716]: E1209 15:10:51.341864 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:51.841836659 +0000 UTC m=+138.996580647 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.379270 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-b25nc"] Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.417957 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-vd9wq"] Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.448043 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:51 crc kubenswrapper[4716]: E1209 15:10:51.448584 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:51.948563628 +0000 UTC m=+139.103307616 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.466407 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-68bqx"] Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.520258 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-z4542"] Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.524254 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cjqcg"] Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.560940 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:51 crc kubenswrapper[4716]: E1209 15:10:51.561359 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:52.061331687 +0000 UTC m=+139.216075695 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.565909 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-nt28v"] Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.598883 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-hcmms"] Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.621531 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-jk4jp"] Dec 09 15:10:51 crc kubenswrapper[4716]: W1209 15:10:51.621823 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda5624b6f_fed8_4b28_bc4d_8143da4a19de.slice/crio-31d9474521e3e0ac1f4c8e4fe0c8e642a1968284dfa38868dd3804744832a7d4 WatchSource:0}: Error finding container 31d9474521e3e0ac1f4c8e4fe0c8e642a1968284dfa38868dd3804744832a7d4: Status 404 returned error can't find the container with id 31d9474521e3e0ac1f4c8e4fe0c8e642a1968284dfa38868dd3804744832a7d4 Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.663100 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:51 crc kubenswrapper[4716]: E1209 15:10:51.663735 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:52.163706804 +0000 UTC m=+139.318450802 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.666221 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-bs6mn" podStartSLOduration=115.666197224 podStartE2EDuration="1m55.666197224s" podCreationTimestamp="2025-12-09 15:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:51.651387509 +0000 UTC m=+138.806131497" watchObservedRunningTime="2025-12-09 15:10:51.666197224 +0000 UTC m=+138.820941222" Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.695299 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qgzhh"] Dec 09 15:10:51 crc kubenswrapper[4716]: W1209 15:10:51.702393 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod28527c1d_72f3_49b3_9a82_26aaae3ece08.slice/crio-d1626d7d0a302d39d2fab8bd0f36245b8ef8532ed05103996b090a2dd8c3d7af WatchSource:0}: Error finding container d1626d7d0a302d39d2fab8bd0f36245b8ef8532ed05103996b090a2dd8c3d7af: Status 404 returned error can't find the container with id d1626d7d0a302d39d2fab8bd0f36245b8ef8532ed05103996b090a2dd8c3d7af Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.703555 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c74ns"] Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.703970 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-gjrqw" podStartSLOduration=116.703940862 podStartE2EDuration="1m56.703940862s" podCreationTimestamp="2025-12-09 15:08:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:51.68355314 +0000 UTC m=+138.838297138" watchObservedRunningTime="2025-12-09 15:10:51.703940862 +0000 UTC m=+138.858684850" Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.718336 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-ckjw6" podStartSLOduration=115.718310684 podStartE2EDuration="1m55.718310684s" podCreationTimestamp="2025-12-09 15:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:51.710932307 +0000 UTC m=+138.865676295" watchObservedRunningTime="2025-12-09 15:10:51.718310684 +0000 UTC m=+138.873054672" Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.769300 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:51 crc kubenswrapper[4716]: E1209 15:10:51.770324 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:52.27028508 +0000 UTC m=+139.425029068 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.871563 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:51 crc kubenswrapper[4716]: E1209 15:10:51.871954 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:52.371941397 +0000 UTC m=+139.526685385 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:51 crc kubenswrapper[4716]: I1209 15:10:51.974225 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:51 crc kubenswrapper[4716]: E1209 15:10:51.974550 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:52.474530711 +0000 UTC m=+139.629274699 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.075820 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:52 crc kubenswrapper[4716]: E1209 15:10:52.076253 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:52.57623255 +0000 UTC m=+139.730976618 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.146003 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421540-z6t4d"] Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.177025 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:52 crc kubenswrapper[4716]: E1209 15:10:52.177415 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:52.677390283 +0000 UTC m=+139.832134271 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.229139 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-68bqx" event={"ID":"d13b21e2-5103-4d54-ae52-1d7344e8168c","Type":"ContainerStarted","Data":"006bd7bdf55882ccac5746b7429c6cf67a5756615aec36f25259dff6cdaa5809"} Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.231155 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-nlkfk"] Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.251810 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-9ndtx" event={"ID":"646b361f-bd5c-4be0-a52c-735688c0f88a","Type":"ContainerStarted","Data":"eb780a3c370579290c101bd7e71c408491f916a66c0df50a93511286170adb24"} Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.267972 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-447g6"] Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.279709 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.282201 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cjqcg" event={"ID":"6e905b55-fbc3-4cd8-b1cf-5092c2421dc0","Type":"ContainerStarted","Data":"df4a1a1a0c9e3753baf0432965973b364342e002da3439fa729a61503d8f1824"} Dec 09 15:10:52 crc kubenswrapper[4716]: E1209 15:10:52.282387 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:52.782366514 +0000 UTC m=+139.937110692 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.309303 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-9ndtx" podStartSLOduration=117.309285118 podStartE2EDuration="1m57.309285118s" podCreationTimestamp="2025-12-09 15:08:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:52.304771981 +0000 UTC m=+139.459515969" watchObservedRunningTime="2025-12-09 15:10:52.309285118 +0000 UTC m=+139.464029116" Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.304603 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-z4542" event={"ID":"a5624b6f-fed8-4b28-bc4d-8143da4a19de","Type":"ContainerStarted","Data":"31d9474521e3e0ac1f4c8e4fe0c8e642a1968284dfa38868dd3804744832a7d4"} Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.334069 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-2xfp8" event={"ID":"4a51c95a-3cda-4cd7-baff-6f802d483286","Type":"ContainerStarted","Data":"8f49eb31df63d9d24a3c5340027a1970586b7c51d2e0405a5eb69319b285db68"} Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.340862 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-87wwr" event={"ID":"a85db618-f9aa-44e6-8060-3b660c70940d","Type":"ContainerStarted","Data":"9114666b04bb13e67a408e39b5e150680d86556a76167604b38a2ba928c62319"} Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.345026 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qgzhh" event={"ID":"b7aaeb25-0a0e-4487-be02-0d847555a3ea","Type":"ContainerStarted","Data":"0bcccb194b6fadc06ee9a46c9538b21707b64a83bd4d51d48f498809b4e8392a"} Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.380598 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:52 crc kubenswrapper[4716]: E1209 15:10:52.382210 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:52.882151339 +0000 UTC m=+140.036895327 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.386179 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:52 crc kubenswrapper[4716]: E1209 15:10:52.389504 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:52.889475174 +0000 UTC m=+140.044219162 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.405308 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-qwkjm" event={"ID":"93712aef-1f09-4361-922f-844e0f5438b6","Type":"ContainerStarted","Data":"e4a97dccae61bab56340b0f8397c59d1815bc8935b2018837eab4d32785f04c8"} Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.406410 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-87wwr" podStartSLOduration=117.406380487 podStartE2EDuration="1m57.406380487s" podCreationTimestamp="2025-12-09 15:08:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:52.372894619 +0000 UTC m=+139.527638607" watchObservedRunningTime="2025-12-09 15:10:52.406380487 +0000 UTC m=+139.561124475" Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.408797 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-hswzp"] Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.414664 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ktjk2" event={"ID":"39725dbb-08de-484d-86c4-0c99c39f0d07","Type":"ContainerStarted","Data":"00a52eea08e7de0d0a991f51a72108a29ceb00d57f8e4d8446a1eb4068248ab8"} Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.430120 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-b25nc" event={"ID":"60e7255c-ae49-4977-94a1-1584892429cb","Type":"ContainerStarted","Data":"3a7f3fe2e03e79835ddaac458ca2faa608ccce1c34eaf6e6a66520861e5aec50"} Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.445376 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-hcmms" event={"ID":"297ecdc1-e23a-469b-a1f5-907876ecdfaa","Type":"ContainerStarted","Data":"290aec6c98aee39d2c78a1e7bcc3612df66fa522e0c9a24475a7952995535018"} Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.447392 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-h9txk" event={"ID":"38472de7-e7a3-4a12-8b6d-96af0d58ea1a","Type":"ContainerStarted","Data":"ca0f63ddd97372a9fb6472e4e8f116f68013e4ec3fcc09032c635c2acb19bfcf"} Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.475590 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qj9vv" event={"ID":"54eee4e4-f460-4d64-a083-0d4b81be7561","Type":"ContainerStarted","Data":"8214fe9f271dac07201f172c6a5d4affed2f20f0d31cd58671aa1e27d71c7908"} Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.482318 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-nt28v" event={"ID":"f08b8fc4-fca7-4a7e-b42d-97f41d3cd136","Type":"ContainerStarted","Data":"d03804f9a0a959b02a35bbf22a3051ed143834e3972647c63184d817b96cd819"} Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.491079 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.497794 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-vd9wq" event={"ID":"dcd694c7-53b3-4289-956c-6919aa0536af","Type":"ContainerStarted","Data":"e65bdeee9a52f9602b97b798bc598deb28dc5648487290577065bbd5be21bc45"} Dec 09 15:10:52 crc kubenswrapper[4716]: E1209 15:10:52.499357 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:52.99931291 +0000 UTC m=+140.154056908 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.517632 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-jj67s"] Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.529478 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-w6p7n" event={"ID":"277458f8-729e-4250-b4d0-db21713e4e48","Type":"ContainerStarted","Data":"3ab0161fa67cc249383783dccc2d8340c38f4379897f53127def1ddf0fd05998"} Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.533894 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-jk4jp" event={"ID":"f9529ab3-6789-4dbf-a404-9e255ad0a559","Type":"ContainerStarted","Data":"b2b0af82181b82a5f2da3a351cf3b2b682056db83759ebe47cdd841c6f2c1aa1"} Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.536640 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dxrvh" event={"ID":"002d496a-6bac-491c-bf18-2b4a325f78ec","Type":"ContainerStarted","Data":"71d7b4f61056ba2bbc80a5f819f918689143cea9a1d07711e3838b8624433376"} Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.550286 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-pvl7k"] Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.558002 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-gjq4j" event={"ID":"28527c1d-72f3-49b3-9a82-26aaae3ece08","Type":"ContainerStarted","Data":"d1626d7d0a302d39d2fab8bd0f36245b8ef8532ed05103996b090a2dd8c3d7af"} Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.576451 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-k9b7p"] Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.605699 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:52 crc kubenswrapper[4716]: E1209 15:10:52.607260 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:53.107243843 +0000 UTC m=+140.261987831 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.611288 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" event={"ID":"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4","Type":"ContainerStarted","Data":"c379e1443debaff54159b74ac5292f7f66c9435c831ab16f7e14d768136df020"} Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.611871 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.617848 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-8qjxz" event={"ID":"31afed1d-4e1c-491e-b54b-a5e7e24077f1","Type":"ContainerStarted","Data":"2a714e28ae6ce133113cb2ff61af5fc8d1f870097a4f4e51fcfe162256cdea64"} Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.619202 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-8qjxz" Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.623205 4716 patch_prober.go:28] interesting pod/downloads-7954f5f757-8qjxz container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" start-of-body= Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.623285 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-8qjxz" podUID="31afed1d-4e1c-491e-b54b-a5e7e24077f1" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.640065 4716 generic.go:334] "Generic (PLEG): container finished" podID="a16e5eaf-14b0-47eb-8bc9-dfedd024c719" containerID="e6b5d8c0d9b27119c26ce0da402558bd99f57e11ff183c91a95d42eb11f77cdd" exitCode=0 Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.640175 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" event={"ID":"a16e5eaf-14b0-47eb-8bc9-dfedd024c719","Type":"ContainerDied","Data":"e6b5d8c0d9b27119c26ce0da402558bd99f57e11ff183c91a95d42eb11f77cdd"} Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.640824 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" podStartSLOduration=117.640776973 podStartE2EDuration="1m57.640776973s" podCreationTimestamp="2025-12-09 15:08:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:52.637437469 +0000 UTC m=+139.792181477" watchObservedRunningTime="2025-12-09 15:10:52.640776973 +0000 UTC m=+139.795520961" Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.641298 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-dxrvh" podStartSLOduration=116.641291457 podStartE2EDuration="1m56.641291457s" podCreationTimestamp="2025-12-09 15:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:52.604055884 +0000 UTC m=+139.758799872" watchObservedRunningTime="2025-12-09 15:10:52.641291457 +0000 UTC m=+139.796035445" Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.654539 4716 generic.go:334] "Generic (PLEG): container finished" podID="326772eb-ce2f-4f9b-bee5-8265a8762f26" containerID="ce5cdf5653d02150a661ba75f6a6ec6f9209334af6385923a34a3aa7faf1141c" exitCode=0 Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.655579 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-8svp5" event={"ID":"326772eb-ce2f-4f9b-bee5-8265a8762f26","Type":"ContainerDied","Data":"ce5cdf5653d02150a661ba75f6a6ec6f9209334af6385923a34a3aa7faf1141c"} Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.655656 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pfn66" Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.655769 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-j9v7v"] Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.667134 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-bs6mn" Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.664678 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-8qjxz" podStartSLOduration=116.664654312 podStartE2EDuration="1m56.664654312s" podCreationTimestamp="2025-12-09 15:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:52.66318088 +0000 UTC m=+139.817924858" watchObservedRunningTime="2025-12-09 15:10:52.664654312 +0000 UTC m=+139.819398300" Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.694516 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-xcs2n"] Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.702337 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pfn66" podStartSLOduration=116.702315856 podStartE2EDuration="1m56.702315856s" podCreationTimestamp="2025-12-09 15:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:52.698197041 +0000 UTC m=+139.852941029" watchObservedRunningTime="2025-12-09 15:10:52.702315856 +0000 UTC m=+139.857059854" Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.703420 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2rcsb"] Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.707971 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:52 crc kubenswrapper[4716]: W1209 15:10:52.709051 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9bc42122_0f65_4008_b335_5539ebd4ad62.slice/crio-5611ac0f8943b218090acd0339c75a26e5dd963577466d7bce0f82d808e51ce3 WatchSource:0}: Error finding container 5611ac0f8943b218090acd0339c75a26e5dd963577466d7bce0f82d808e51ce3: Status 404 returned error can't find the container with id 5611ac0f8943b218090acd0339c75a26e5dd963577466d7bce0f82d808e51ce3 Dec 09 15:10:52 crc kubenswrapper[4716]: E1209 15:10:52.709389 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:53.209360774 +0000 UTC m=+140.364104762 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.715586 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:52 crc kubenswrapper[4716]: E1209 15:10:52.716879 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:53.216855514 +0000 UTC m=+140.371599652 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:52 crc kubenswrapper[4716]: W1209 15:10:52.732097 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode70d414d_6ada_448f_8047_73667b53d329.slice/crio-1c77233140ec2d81556127571dabb13e2ab00e55552c3d69c66147b6455191fd WatchSource:0}: Error finding container 1c77233140ec2d81556127571dabb13e2ab00e55552c3d69c66147b6455191fd: Status 404 returned error can't find the container with id 1c77233140ec2d81556127571dabb13e2ab00e55552c3d69c66147b6455191fd Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.819314 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:52 crc kubenswrapper[4716]: E1209 15:10:52.819477 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:53.319445017 +0000 UTC m=+140.474189015 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.819535 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:52 crc kubenswrapper[4716]: E1209 15:10:52.820030 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:53.320016233 +0000 UTC m=+140.474760221 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.839389 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x572x"] Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.857103 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-62zfg"] Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.921204 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:52 crc kubenswrapper[4716]: E1209 15:10:52.921788 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:53.421735581 +0000 UTC m=+140.576479569 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.942489 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:10:52 crc kubenswrapper[4716]: W1209 15:10:52.954174 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda5f3bc47_b766_47d3_b801_01f31edfe5ab.slice/crio-b996de55164f8a35b2b2c971077babbdccfbd3c57a6164f08f5b5785999c2380 WatchSource:0}: Error finding container b996de55164f8a35b2b2c971077babbdccfbd3c57a6164f08f5b5785999c2380: Status 404 returned error can't find the container with id b996de55164f8a35b2b2c971077babbdccfbd3c57a6164f08f5b5785999c2380 Dec 09 15:10:52 crc kubenswrapper[4716]: I1209 15:10:52.982022 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pfn66" Dec 09 15:10:53 crc kubenswrapper[4716]: I1209 15:10:53.025856 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:53 crc kubenswrapper[4716]: E1209 15:10:53.029259 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:53.529237763 +0000 UTC m=+140.683981741 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:53 crc kubenswrapper[4716]: I1209 15:10:53.141075 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:53 crc kubenswrapper[4716]: E1209 15:10:53.141782 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:53.641751214 +0000 UTC m=+140.796495212 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:53 crc kubenswrapper[4716]: I1209 15:10:53.142346 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:53 crc kubenswrapper[4716]: E1209 15:10:53.144242 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:53.644218653 +0000 UTC m=+140.798962641 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:53 crc kubenswrapper[4716]: I1209 15:10:53.248319 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:53 crc kubenswrapper[4716]: E1209 15:10:53.248720 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:53.74870008 +0000 UTC m=+140.903444068 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:53 crc kubenswrapper[4716]: I1209 15:10:53.349922 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:53 crc kubenswrapper[4716]: E1209 15:10:53.350615 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:53.850574743 +0000 UTC m=+141.005318791 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:53 crc kubenswrapper[4716]: I1209 15:10:53.453345 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:53 crc kubenswrapper[4716]: E1209 15:10:53.453795 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:53.953772384 +0000 UTC m=+141.108516382 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:53 crc kubenswrapper[4716]: I1209 15:10:53.557136 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:53 crc kubenswrapper[4716]: E1209 15:10:53.557530 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:54.05751399 +0000 UTC m=+141.212257978 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:53 crc kubenswrapper[4716]: I1209 15:10:53.661429 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:53 crc kubenswrapper[4716]: E1209 15:10:53.661957 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:54.161937365 +0000 UTC m=+141.316681353 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:53 crc kubenswrapper[4716]: I1209 15:10:53.762976 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:53 crc kubenswrapper[4716]: I1209 15:10:53.766551 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-68bqx" event={"ID":"d13b21e2-5103-4d54-ae52-1d7344e8168c","Type":"ContainerStarted","Data":"8bd8326d3bc6657283c99b46cdbf6da8e19253dcb7a1c9dc20a93713d1406994"} Dec 09 15:10:53 crc kubenswrapper[4716]: E1209 15:10:53.775643 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:54.275604319 +0000 UTC m=+141.430348307 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:53 crc kubenswrapper[4716]: I1209 15:10:53.802760 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x572x" event={"ID":"a5f3bc47-b766-47d3-b801-01f31edfe5ab","Type":"ContainerStarted","Data":"b996de55164f8a35b2b2c971077babbdccfbd3c57a6164f08f5b5785999c2380"} Dec 09 15:10:53 crc kubenswrapper[4716]: I1209 15:10:53.850970 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-62zfg" event={"ID":"07bbf222-b692-4ffb-87e7-32cd55c110b9","Type":"ContainerStarted","Data":"b07e9f4a5426911d2f9bb54d1ae6124481558f99d9a18ce85c8770936a3af62c"} Dec 09 15:10:53 crc kubenswrapper[4716]: I1209 15:10:53.853861 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-w9bdm" event={"ID":"e432b91b-278e-4a2f-81cc-4b7983a789da","Type":"ContainerStarted","Data":"233a0dc1d679c50c3f3c530c937aa0d2f7f2d5873cba1cb5e75280f039a31c26"} Dec 09 15:10:53 crc kubenswrapper[4716]: I1209 15:10:53.864452 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:53 crc kubenswrapper[4716]: E1209 15:10:53.864906 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:54.364882899 +0000 UTC m=+141.519626887 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:53 crc kubenswrapper[4716]: I1209 15:10:53.884572 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-hswzp" event={"ID":"4ff467d8-21a2-4f5f-a9eb-37800bfc01d8","Type":"ContainerStarted","Data":"75f3f3313463862f08e41f6493b57ffe71100fd8e888f7604d750a9f3696734e"} Dec 09 15:10:53 crc kubenswrapper[4716]: I1209 15:10:53.921921 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-w6p7n" event={"ID":"277458f8-729e-4250-b4d0-db21713e4e48","Type":"ContainerStarted","Data":"3adee94494bc507e647b5370b1c25097c161fd7b49fc504b08d076bf7ea81fbd"} Dec 09 15:10:53 crc kubenswrapper[4716]: I1209 15:10:53.957185 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-h9txk" event={"ID":"38472de7-e7a3-4a12-8b6d-96af0d58ea1a","Type":"ContainerStarted","Data":"8a48d1ed19e6d0001f60822c7f69ad993425d457f621afee463c8c58cd930b82"} Dec 09 15:10:53 crc kubenswrapper[4716]: I1209 15:10:53.969788 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:53 crc kubenswrapper[4716]: E1209 15:10:53.972494 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:54.472476993 +0000 UTC m=+141.627220981 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:53 crc kubenswrapper[4716]: I1209 15:10:53.980917 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-j9v7v" event={"ID":"628cc532-1e6f-4a8e-a535-60d65177f38b","Type":"ContainerStarted","Data":"a7cb3e673d4782c03bba8f7b1950ed253543b2466229b272d0b0e3002136bfb9"} Dec 09 15:10:53 crc kubenswrapper[4716]: I1209 15:10:53.980991 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-j9v7v" event={"ID":"628cc532-1e6f-4a8e-a535-60d65177f38b","Type":"ContainerStarted","Data":"a8fdaad503ebd72bfbf62a5ceb6a3e017a4e0ad3905139f5e9064ed448924f80"} Dec 09 15:10:53 crc kubenswrapper[4716]: I1209 15:10:53.981419 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-j9v7v" Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.040958 4716 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-j9v7v container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.43:8443/healthz\": dial tcp 10.217.0.43:8443: connect: connection refused" start-of-body= Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.041317 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-j9v7v" podUID="628cc532-1e6f-4a8e-a535-60d65177f38b" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.43:8443/healthz\": dial tcp 10.217.0.43:8443: connect: connection refused" Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.067457 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-vd9wq" event={"ID":"dcd694c7-53b3-4289-956c-6919aa0536af","Type":"ContainerStarted","Data":"46c6f79fbbdda0b329b2f9ae99bb89eb4181b866e7a7f316284b00bf30f3b3de"} Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.070286 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-vd9wq" Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.070400 4716 patch_prober.go:28] interesting pod/console-operator-58897d9998-vd9wq container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.33:8443/readyz\": dial tcp 10.217.0.33:8443: connect: connection refused" start-of-body= Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.070450 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-vd9wq" podUID="dcd694c7-53b3-4289-956c-6919aa0536af" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.33:8443/readyz\": dial tcp 10.217.0.33:8443: connect: connection refused" Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.073106 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:54 crc kubenswrapper[4716]: E1209 15:10:54.079081 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:54.579036968 +0000 UTC m=+141.733780966 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.084954 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:54 crc kubenswrapper[4716]: E1209 15:10:54.085493 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:54.585474748 +0000 UTC m=+141.740218736 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.176127 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qgzhh" event={"ID":"b7aaeb25-0a0e-4487-be02-0d847555a3ea","Type":"ContainerStarted","Data":"64a4a2dba5bc30973c95f278503c489d7102a8a1e5f3847556cddca9f889cf66"} Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.177692 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-qwkjm" event={"ID":"93712aef-1f09-4361-922f-844e0f5438b6","Type":"ContainerStarted","Data":"b00851d48bb7a3d140c93cd0c8357f942a4c89639af30f94c2f8d2bea54ad840"} Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.191582 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:54 crc kubenswrapper[4716]: E1209 15:10:54.192924 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:54.692904127 +0000 UTC m=+141.847648115 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.278951 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-b25nc" event={"ID":"60e7255c-ae49-4977-94a1-1584892429cb","Type":"ContainerStarted","Data":"6fd0289479eba51dfd6f72acc291f439cfaa8d7230744cfe2ffe7c529e602bf3"} Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.294687 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:54 crc kubenswrapper[4716]: E1209 15:10:54.295752 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:54.795739428 +0000 UTC m=+141.950483416 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.296451 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qj9vv" event={"ID":"54eee4e4-f460-4d64-a083-0d4b81be7561","Type":"ContainerStarted","Data":"9a885102bbe32b1fce806fd044c8dde25ca495889a4d88d6d3454ced30cea805"} Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.300746 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-xcs2n" event={"ID":"a18804a5-1680-4833-b3f6-1f02584286e1","Type":"ContainerStarted","Data":"42f130d8cf664dd4d4b811e350d244aef63dba0d8d31ea3aa10191e51e44958a"} Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.338652 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-447g6" event={"ID":"62b9c370-77f9-489c-b34f-6ea6a45e4245","Type":"ContainerStarted","Data":"f57820e56ce138449b238165a83340294cd40e6eb697fac6fd8aa7d894cc984c"} Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.338714 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-447g6" event={"ID":"62b9c370-77f9-489c-b34f-6ea6a45e4245","Type":"ContainerStarted","Data":"c83b886ced0dd14fd9f9cb1ad4740d1003acb51b7577c00a6aaaf888616b688a"} Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.377797 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cjqcg" event={"ID":"6e905b55-fbc3-4cd8-b1cf-5092c2421dc0","Type":"ContainerStarted","Data":"b2cf27e56a27365ca6747a052006dad1e2a81dcf42af1a8e465afb555ddbcadc"} Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.397464 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:54 crc kubenswrapper[4716]: E1209 15:10:54.399215 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:54.899194416 +0000 UTC m=+142.053938394 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.408968 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ktjk2" event={"ID":"39725dbb-08de-484d-86c4-0c99c39f0d07","Type":"ContainerStarted","Data":"2ac4d78f5cfef22460a8f1e8334640733738d86cdd8180834b8b26073ddb5b93"} Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.444454 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-hcmms" event={"ID":"297ecdc1-e23a-469b-a1f5-907876ecdfaa","Type":"ContainerStarted","Data":"b0f098e87878a752440bcafea1f1ee435848920842d8b8ede83046afa8ce46f2"} Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.458578 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-nt28v" event={"ID":"f08b8fc4-fca7-4a7e-b42d-97f41d3cd136","Type":"ContainerStarted","Data":"46b9624cf20c3eb172db360b82faae2685bf8813cd8eb9eb5d377671ea32faf3"} Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.464169 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-pvl7k" event={"ID":"103c6227-5dc6-4322-ae14-201ad9e08295","Type":"ContainerStarted","Data":"3f1314c7db33cc08e0b7e51d9218ae44f56deebcb39aa8d908cc039b6ec61a24"} Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.513088 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-z4542" event={"ID":"a5624b6f-fed8-4b28-bc4d-8143da4a19de","Type":"ContainerStarted","Data":"01057b49c4c2ee496f9593099813cdb2eb58bf3f34b093999416bd6503af6fe2"} Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.515116 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:54 crc kubenswrapper[4716]: E1209 15:10:54.518185 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:55.018167708 +0000 UTC m=+142.172911766 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.567284 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-nlkfk" event={"ID":"9bc42122-0f65-4008-b335-5539ebd4ad62","Type":"ContainerStarted","Data":"5611ac0f8943b218090acd0339c75a26e5dd963577466d7bce0f82d808e51ce3"} Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.609831 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-w9bdm" Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.616784 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:54 crc kubenswrapper[4716]: E1209 15:10:54.618396 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:55.118367805 +0000 UTC m=+142.273111793 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.619214 4716 patch_prober.go:28] interesting pod/router-default-5444994796-w9bdm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 09 15:10:54 crc kubenswrapper[4716]: [-]has-synced failed: reason withheld Dec 09 15:10:54 crc kubenswrapper[4716]: [+]process-running ok Dec 09 15:10:54 crc kubenswrapper[4716]: healthz check failed Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.619266 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-w9bdm" podUID="e432b91b-278e-4a2f-81cc-4b7983a789da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.677029 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-2xfp8" event={"ID":"4a51c95a-3cda-4cd7-baff-6f802d483286","Type":"ContainerStarted","Data":"6b29dc87e9376c2b8148409f5619f04a2b3320f4cb31403759003d74ddd6751f"} Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.689195 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-jj67s" event={"ID":"e70d414d-6ada-448f-8047-73667b53d329","Type":"ContainerStarted","Data":"575598ed71ca7173ffff0bebc39c1616e232156375d26633330c14710401b6ef"} Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.689249 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-jj67s" event={"ID":"e70d414d-6ada-448f-8047-73667b53d329","Type":"ContainerStarted","Data":"1c77233140ec2d81556127571dabb13e2ab00e55552c3d69c66147b6455191fd"} Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.722151 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.722607 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-k9b7p" event={"ID":"c4635fa5-b3e0-458f-80ef-4f44cc8097fa","Type":"ContainerStarted","Data":"a317203ad58747520249cc7aeca552c0558969dd33a91a8fca1b6043f32b554a"} Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.722681 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-k9b7p" event={"ID":"c4635fa5-b3e0-458f-80ef-4f44cc8097fa","Type":"ContainerStarted","Data":"8fbebafe4306d1bb5f9f94aeb18c7412b99c78a68cf7c2d92d266bd945764f11"} Dec 09 15:10:54 crc kubenswrapper[4716]: E1209 15:10:54.723791 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:55.223770537 +0000 UTC m=+142.378514725 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.734609 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c74ns" event={"ID":"6995d782-f9c9-4e5a-95a6-07ac426ad59c","Type":"ContainerStarted","Data":"ed0874da4b7166b9834ec49a37ebd1b22ad077a8484d475a88dd3c0f8415da12"} Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.734714 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c74ns" event={"ID":"6995d782-f9c9-4e5a-95a6-07ac426ad59c","Type":"ContainerStarted","Data":"c498e35c6947a05f0e54840921c879211a4013e3428b2b78684da3681aa13064"} Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.744904 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421540-z6t4d" event={"ID":"8e0c79e5-04c3-4242-9472-0cb67c34b499","Type":"ContainerStarted","Data":"a4bd52af2627d0fdaa7912ece7f1ef16690a42bc345f1501ef0fece4bc24c8fe"} Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.744961 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421540-z6t4d" event={"ID":"8e0c79e5-04c3-4242-9472-0cb67c34b499","Type":"ContainerStarted","Data":"503b3dbc558c7ae803355afe7671420c10a756a326039691a5f9737206244735"} Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.798121 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-jk4jp" event={"ID":"f9529ab3-6789-4dbf-a404-9e255ad0a559","Type":"ContainerStarted","Data":"dc17e8c4106c163684d8e87666dc21b38fc02f4dd470f6d46590e2371f93a959"} Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.825647 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:54 crc kubenswrapper[4716]: E1209 15:10:54.827301 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:55.327276036 +0000 UTC m=+142.482020044 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.844462 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-2rcsb" event={"ID":"a25d26a9-7c6b-455e-9b8d-cc2fba08c576","Type":"ContainerStarted","Data":"ec635134830b3623a11ff2c30c911857e13239d4c777b5248341d736a634afc0"} Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.845786 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-2rcsb" Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.862636 4716 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-2rcsb container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" start-of-body= Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.862686 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-2rcsb" podUID="a25d26a9-7c6b-455e-9b8d-cc2fba08c576" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.873147 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-hcmms" podStartSLOduration=118.873123801 podStartE2EDuration="1m58.873123801s" podCreationTimestamp="2025-12-09 15:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:54.862288667 +0000 UTC m=+142.017032655" watchObservedRunningTime="2025-12-09 15:10:54.873123801 +0000 UTC m=+142.027867789" Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.891672 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-gjq4j" event={"ID":"28527c1d-72f3-49b3-9a82-26aaae3ece08","Type":"ContainerStarted","Data":"9ae2ed67ca58167dc4e07c205cae3561816b9047662e5fe87798d0967371b271"} Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.895335 4716 patch_prober.go:28] interesting pod/downloads-7954f5f757-8qjxz container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" start-of-body= Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.895384 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-8qjxz" podUID="31afed1d-4e1c-491e-b54b-a5e7e24077f1" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" Dec 09 15:10:54 crc kubenswrapper[4716]: I1209 15:10:54.933318 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:54 crc kubenswrapper[4716]: E1209 15:10:54.941922 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:55.441908017 +0000 UTC m=+142.596652005 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:55 crc kubenswrapper[4716]: I1209 15:10:55.030364 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-jj67s" podStartSLOduration=119.030330474 podStartE2EDuration="1m59.030330474s" podCreationTimestamp="2025-12-09 15:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:54.932146274 +0000 UTC m=+142.086890282" watchObservedRunningTime="2025-12-09 15:10:55.030330474 +0000 UTC m=+142.185074462" Dec 09 15:10:55 crc kubenswrapper[4716]: I1209 15:10:55.035132 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:55 crc kubenswrapper[4716]: I1209 15:10:55.035795 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-qwkjm" podStartSLOduration=119.035658923 podStartE2EDuration="1m59.035658923s" podCreationTimestamp="2025-12-09 15:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:55.005732385 +0000 UTC m=+142.160476373" watchObservedRunningTime="2025-12-09 15:10:55.035658923 +0000 UTC m=+142.190402911" Dec 09 15:10:55 crc kubenswrapper[4716]: E1209 15:10:55.049849 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:55.54982174 +0000 UTC m=+142.704565728 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:55 crc kubenswrapper[4716]: I1209 15:10:55.050077 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:55 crc kubenswrapper[4716]: E1209 15:10:55.050776 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:55.550758366 +0000 UTC m=+142.705502354 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:55 crc kubenswrapper[4716]: I1209 15:10:55.154417 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:55 crc kubenswrapper[4716]: E1209 15:10:55.155011 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:55.654984886 +0000 UTC m=+142.809728874 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:55 crc kubenswrapper[4716]: I1209 15:10:55.159807 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-w6p7n" podStartSLOduration=119.15977664 podStartE2EDuration="1m59.15977664s" podCreationTimestamp="2025-12-09 15:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:55.133865624 +0000 UTC m=+142.288609622" watchObservedRunningTime="2025-12-09 15:10:55.15977664 +0000 UTC m=+142.314520628" Dec 09 15:10:55 crc kubenswrapper[4716]: I1209 15:10:55.242534 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-vd9wq" podStartSLOduration=119.242503817 podStartE2EDuration="1m59.242503817s" podCreationTimestamp="2025-12-09 15:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:55.239239746 +0000 UTC m=+142.393983744" watchObservedRunningTime="2025-12-09 15:10:55.242503817 +0000 UTC m=+142.397247805" Dec 09 15:10:55 crc kubenswrapper[4716]: I1209 15:10:55.258910 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:55 crc kubenswrapper[4716]: E1209 15:10:55.259484 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:55.759465942 +0000 UTC m=+142.914209930 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:55 crc kubenswrapper[4716]: I1209 15:10:55.322854 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c74ns" podStartSLOduration=119.322824767 podStartE2EDuration="1m59.322824767s" podCreationTimestamp="2025-12-09 15:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:55.320862242 +0000 UTC m=+142.475606240" watchObservedRunningTime="2025-12-09 15:10:55.322824767 +0000 UTC m=+142.477568765" Dec 09 15:10:55 crc kubenswrapper[4716]: I1209 15:10:55.360400 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:55 crc kubenswrapper[4716]: E1209 15:10:55.365146 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:55.865095541 +0000 UTC m=+143.019839529 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:55 crc kubenswrapper[4716]: I1209 15:10:55.466949 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:55 crc kubenswrapper[4716]: E1209 15:10:55.467598 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:55.967578271 +0000 UTC m=+143.122322259 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:55 crc kubenswrapper[4716]: I1209 15:10:55.524604 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-w9bdm" podStartSLOduration=119.524576558 podStartE2EDuration="1m59.524576558s" podCreationTimestamp="2025-12-09 15:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:55.458077545 +0000 UTC m=+142.612821533" watchObservedRunningTime="2025-12-09 15:10:55.524576558 +0000 UTC m=+142.679320546" Dec 09 15:10:55 crc kubenswrapper[4716]: I1209 15:10:55.568046 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:55 crc kubenswrapper[4716]: E1209 15:10:55.568476 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:56.068456467 +0000 UTC m=+143.223200455 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:55 crc kubenswrapper[4716]: I1209 15:10:55.609370 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-447g6" podStartSLOduration=119.609329712 podStartE2EDuration="1m59.609329712s" podCreationTimestamp="2025-12-09 15:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:55.526508382 +0000 UTC m=+142.681252370" watchObservedRunningTime="2025-12-09 15:10:55.609329712 +0000 UTC m=+142.764073700" Dec 09 15:10:55 crc kubenswrapper[4716]: I1209 15:10:55.609575 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-k9b7p" podStartSLOduration=119.609568378 podStartE2EDuration="1m59.609568378s" podCreationTimestamp="2025-12-09 15:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:55.609149367 +0000 UTC m=+142.763893355" watchObservedRunningTime="2025-12-09 15:10:55.609568378 +0000 UTC m=+142.764312366" Dec 09 15:10:55 crc kubenswrapper[4716]: I1209 15:10:55.622839 4716 patch_prober.go:28] interesting pod/router-default-5444994796-w9bdm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 09 15:10:55 crc kubenswrapper[4716]: [-]has-synced failed: reason withheld Dec 09 15:10:55 crc kubenswrapper[4716]: [+]process-running ok Dec 09 15:10:55 crc kubenswrapper[4716]: healthz check failed Dec 09 15:10:55 crc kubenswrapper[4716]: I1209 15:10:55.622917 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-w9bdm" podUID="e432b91b-278e-4a2f-81cc-4b7983a789da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 09 15:10:55 crc kubenswrapper[4716]: I1209 15:10:55.661551 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-z4542" podStartSLOduration=119.661503013 podStartE2EDuration="1m59.661503013s" podCreationTimestamp="2025-12-09 15:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:55.654564319 +0000 UTC m=+142.809308307" watchObservedRunningTime="2025-12-09 15:10:55.661503013 +0000 UTC m=+142.816247001" Dec 09 15:10:55 crc kubenswrapper[4716]: I1209 15:10:55.670139 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:55 crc kubenswrapper[4716]: E1209 15:10:55.670590 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:56.170572137 +0000 UTC m=+143.325316125 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:55 crc kubenswrapper[4716]: I1209 15:10:55.707313 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29421540-z6t4d" podStartSLOduration=119.707283206 podStartE2EDuration="1m59.707283206s" podCreationTimestamp="2025-12-09 15:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:55.705328961 +0000 UTC m=+142.860072949" watchObservedRunningTime="2025-12-09 15:10:55.707283206 +0000 UTC m=+142.862027194" Dec 09 15:10:55 crc kubenswrapper[4716]: I1209 15:10:55.749151 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cjqcg" podStartSLOduration=119.749122027 podStartE2EDuration="1m59.749122027s" podCreationTimestamp="2025-12-09 15:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:55.745456655 +0000 UTC m=+142.900200673" watchObservedRunningTime="2025-12-09 15:10:55.749122027 +0000 UTC m=+142.903866015" Dec 09 15:10:55 crc kubenswrapper[4716]: I1209 15:10:55.771565 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:55 crc kubenswrapper[4716]: E1209 15:10:55.772106 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:56.272084011 +0000 UTC m=+143.426827999 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:55 crc kubenswrapper[4716]: I1209 15:10:55.822846 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-b25nc" podStartSLOduration=119.822821482 podStartE2EDuration="1m59.822821482s" podCreationTimestamp="2025-12-09 15:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:55.821251588 +0000 UTC m=+142.975995576" watchObservedRunningTime="2025-12-09 15:10:55.822821482 +0000 UTC m=+142.977565470" Dec 09 15:10:55 crc kubenswrapper[4716]: I1209 15:10:55.854027 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-j9v7v" podStartSLOduration=119.854000235 podStartE2EDuration="1m59.854000235s" podCreationTimestamp="2025-12-09 15:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:55.850514837 +0000 UTC m=+143.005258845" watchObservedRunningTime="2025-12-09 15:10:55.854000235 +0000 UTC m=+143.008744223" Dec 09 15:10:55 crc kubenswrapper[4716]: I1209 15:10:55.872983 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:55 crc kubenswrapper[4716]: E1209 15:10:55.873545 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:56.373528762 +0000 UTC m=+143.528272750 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:55 crc kubenswrapper[4716]: I1209 15:10:55.921563 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-2xfp8" podStartSLOduration=119.921541307 podStartE2EDuration="1m59.921541307s" podCreationTimestamp="2025-12-09 15:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:55.920308142 +0000 UTC m=+143.075052130" watchObservedRunningTime="2025-12-09 15:10:55.921541307 +0000 UTC m=+143.076285295" Dec 09 15:10:55 crc kubenswrapper[4716]: I1209 15:10:55.976460 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-2rcsb" podStartSLOduration=119.976422034 podStartE2EDuration="1m59.976422034s" podCreationTimestamp="2025-12-09 15:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:55.971020803 +0000 UTC m=+143.125764801" watchObservedRunningTime="2025-12-09 15:10:55.976422034 +0000 UTC m=+143.131166022" Dec 09 15:10:55 crc kubenswrapper[4716]: I1209 15:10:55.976964 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:55 crc kubenswrapper[4716]: E1209 15:10:55.977439 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:56.477418482 +0000 UTC m=+143.632162470 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:55 crc kubenswrapper[4716]: I1209 15:10:55.993730 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" event={"ID":"a16e5eaf-14b0-47eb-8bc9-dfedd024c719","Type":"ContainerStarted","Data":"8e175f1d51a6509775995bdf00966f380a114f420da1557d339f6eba6a4f46cc"} Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.014946 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-gjq4j" podStartSLOduration=9.014919153 podStartE2EDuration="9.014919153s" podCreationTimestamp="2025-12-09 15:10:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:56.012135615 +0000 UTC m=+143.166879633" watchObservedRunningTime="2025-12-09 15:10:56.014919153 +0000 UTC m=+143.169663141" Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.021180 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qgzhh" event={"ID":"b7aaeb25-0a0e-4487-be02-0d847555a3ea","Type":"ContainerStarted","Data":"c957aef10c733a96d8707da610fd954a9288c65ae7b2ee5868dc0d82bee57811"} Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.021871 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qgzhh" Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.032338 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-h9txk" event={"ID":"38472de7-e7a3-4a12-8b6d-96af0d58ea1a","Type":"ContainerStarted","Data":"70b0af6dadca3195a2442f00ddf7992fadea65f632a0ac27abd771e9c9559da2"} Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.047924 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x572x" event={"ID":"a5f3bc47-b766-47d3-b801-01f31edfe5ab","Type":"ContainerStarted","Data":"30e4cdaf0ae9d69c39eb39a0fdadbe502cb58cf41c552f6d0315601ce45de971"} Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.048825 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x572x" Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.067765 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-hswzp" event={"ID":"4ff467d8-21a2-4f5f-a9eb-37800bfc01d8","Type":"ContainerStarted","Data":"2461e06ae162ce4740041f8608c92cb32a3f286515ee61e30deaae6039f1a6ec"} Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.067867 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-hswzp" event={"ID":"4ff467d8-21a2-4f5f-a9eb-37800bfc01d8","Type":"ContainerStarted","Data":"62e3f260c933d0fd8c402c220a2caf74e4668fdaf35a357b432830806cc594d5"} Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.070161 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-xcs2n" event={"ID":"a18804a5-1680-4833-b3f6-1f02584286e1","Type":"ContainerStarted","Data":"1d83e6859e0597847b3e1e5b00fa350dd9d519059edb59d6a0690bdccb72fb26"} Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.081260 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:56 crc kubenswrapper[4716]: E1209 15:10:56.083714 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:56.583666358 +0000 UTC m=+143.738410346 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.100036 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-447g6" event={"ID":"62b9c370-77f9-489c-b34f-6ea6a45e4245","Type":"ContainerStarted","Data":"07e967dbae1101c550ac47c08ed170aa5a11c95842eaf0d73cfbb5c4ba1cc9f9"} Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.116898 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-2rcsb" event={"ID":"a25d26a9-7c6b-455e-9b8d-cc2fba08c576","Type":"ContainerStarted","Data":"977dad93384f53ac03b0809aba7382612d2054b587d18b1af2f99c77b79b772f"} Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.118740 4716 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-2rcsb container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" start-of-body= Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.118793 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-2rcsb" podUID="a25d26a9-7c6b-455e-9b8d-cc2fba08c576" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.138509 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ktjk2" event={"ID":"39725dbb-08de-484d-86c4-0c99c39f0d07","Type":"ContainerStarted","Data":"1a6c222be79bdc45857514c42b8e5fa8285f11987405a8b40def5b2a31f5f765"} Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.177967 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-pvl7k" event={"ID":"103c6227-5dc6-4322-ae14-201ad9e08295","Type":"ContainerStarted","Data":"e531a665d40530ffcf8f6d2fbbd40d9aa9975cbfff87da1ab415b80a6d7d82c1"} Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.178865 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-pvl7k" Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.182842 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qgzhh" podStartSLOduration=120.182808415 podStartE2EDuration="2m0.182808415s" podCreationTimestamp="2025-12-09 15:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:56.080551881 +0000 UTC m=+143.235295869" watchObservedRunningTime="2025-12-09 15:10:56.182808415 +0000 UTC m=+143.337552403" Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.189455 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:56 crc kubenswrapper[4716]: E1209 15:10:56.189582 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:56.689560304 +0000 UTC m=+143.844304292 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.192546 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:56 crc kubenswrapper[4716]: E1209 15:10:56.194054 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:56.69404287 +0000 UTC m=+143.848786858 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.215028 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-62zfg" event={"ID":"07bbf222-b692-4ffb-87e7-32cd55c110b9","Type":"ContainerStarted","Data":"1ec77966bc91af8d4813e56151263dc408652b444922811ba7d8279b08e5a0a8"} Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.216377 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-62zfg" Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.234728 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x572x" podStartSLOduration=120.234698639 podStartE2EDuration="2m0.234698639s" podCreationTimestamp="2025-12-09 15:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:56.181952251 +0000 UTC m=+143.336696239" watchObservedRunningTime="2025-12-09 15:10:56.234698639 +0000 UTC m=+143.389442637" Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.236868 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-jk4jp" event={"ID":"f9529ab3-6789-4dbf-a404-9e255ad0a559","Type":"ContainerStarted","Data":"db1962655c5aaa26037468b7903b658307115668b8b293dce081ed50312d7312"} Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.257250 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-62zfg" Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.264010 4716 generic.go:334] "Generic (PLEG): container finished" podID="f08b8fc4-fca7-4a7e-b42d-97f41d3cd136" containerID="46b9624cf20c3eb172db360b82faae2685bf8813cd8eb9eb5d377671ea32faf3" exitCode=0 Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.264731 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-nt28v" event={"ID":"f08b8fc4-fca7-4a7e-b42d-97f41d3cd136","Type":"ContainerDied","Data":"46b9624cf20c3eb172db360b82faae2685bf8813cd8eb9eb5d377671ea32faf3"} Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.264815 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-nt28v" event={"ID":"f08b8fc4-fca7-4a7e-b42d-97f41d3cd136","Type":"ContainerStarted","Data":"451a70591a849373f5083d6dd5c48e1dc35b2081d0658181cafcf7fe5033d462"} Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.264887 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-nt28v" Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.309749 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:56 crc kubenswrapper[4716]: E1209 15:10:56.311189 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:56.81116749 +0000 UTC m=+143.965911478 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.316349 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-nlkfk" event={"ID":"9bc42122-0f65-4008-b335-5539ebd4ad62","Type":"ContainerStarted","Data":"fb3e7a5563f727d6769c45c121c129df67cf238fa4eb35b12006dd6e766e9ee4"} Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.330966 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-h9txk" podStartSLOduration=120.330929164 podStartE2EDuration="2m0.330929164s" podCreationTimestamp="2025-12-09 15:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:56.233251618 +0000 UTC m=+143.387995606" watchObservedRunningTime="2025-12-09 15:10:56.330929164 +0000 UTC m=+143.485673152" Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.348467 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-hswzp" podStartSLOduration=120.348435554 podStartE2EDuration="2m0.348435554s" podCreationTimestamp="2025-12-09 15:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:56.31435629 +0000 UTC m=+143.469100288" watchObservedRunningTime="2025-12-09 15:10:56.348435554 +0000 UTC m=+143.503179542" Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.373261 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qj9vv" event={"ID":"54eee4e4-f460-4d64-a083-0d4b81be7561","Type":"ContainerStarted","Data":"2ce2f0d41a8cd68ed076e37dae24b228fe4072df3371a60322a5f67adba5c9e6"} Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.395804 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-8svp5" event={"ID":"326772eb-ce2f-4f9b-bee5-8265a8762f26","Type":"ContainerStarted","Data":"0332f3b0fb403f4a726fe0acb4a0499f3b0160af5af54d60331c9974344001d6"} Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.397819 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-68bqx" event={"ID":"d13b21e2-5103-4d54-ae52-1d7344e8168c","Type":"ContainerStarted","Data":"50c40afd3f5463599fc0fe5b7b35de537572c17637ce28078c5c17375c153004"} Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.419265 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:56 crc kubenswrapper[4716]: E1209 15:10:56.420575 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:56.920562055 +0000 UTC m=+144.075306043 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.445725 4716 patch_prober.go:28] interesting pod/downloads-7954f5f757-8qjxz container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" start-of-body= Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.445784 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-8qjxz" podUID="31afed1d-4e1c-491e-b54b-a5e7e24077f1" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.472550 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-xcs2n" podStartSLOduration=9.472522129 podStartE2EDuration="9.472522129s" podCreationTimestamp="2025-12-09 15:10:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:56.382781216 +0000 UTC m=+143.537525394" watchObservedRunningTime="2025-12-09 15:10:56.472522129 +0000 UTC m=+143.627266117" Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.475564 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-62zfg" podStartSLOduration=120.475551684 podStartE2EDuration="2m0.475551684s" podCreationTimestamp="2025-12-09 15:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:56.463167697 +0000 UTC m=+143.617911685" watchObservedRunningTime="2025-12-09 15:10:56.475551684 +0000 UTC m=+143.630295682" Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.493891 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-j9v7v" Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.522613 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:56 crc kubenswrapper[4716]: E1209 15:10:56.523108 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:57.023078755 +0000 UTC m=+144.177822753 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.556728 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-jk4jp" podStartSLOduration=120.556706957 podStartE2EDuration="2m0.556706957s" podCreationTimestamp="2025-12-09 15:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:56.554120545 +0000 UTC m=+143.708864553" watchObservedRunningTime="2025-12-09 15:10:56.556706957 +0000 UTC m=+143.711450965" Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.621178 4716 patch_prober.go:28] interesting pod/router-default-5444994796-w9bdm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 09 15:10:56 crc kubenswrapper[4716]: [-]has-synced failed: reason withheld Dec 09 15:10:56 crc kubenswrapper[4716]: [+]process-running ok Dec 09 15:10:56 crc kubenswrapper[4716]: healthz check failed Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.621248 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-w9bdm" podUID="e432b91b-278e-4a2f-81cc-4b7983a789da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.631000 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:56 crc kubenswrapper[4716]: E1209 15:10:56.635971 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:57.135947547 +0000 UTC m=+144.290691755 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.677112 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-qj9vv" podStartSLOduration=120.677081609 podStartE2EDuration="2m0.677081609s" podCreationTimestamp="2025-12-09 15:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:56.661546484 +0000 UTC m=+143.816290472" watchObservedRunningTime="2025-12-09 15:10:56.677081609 +0000 UTC m=+143.831825597" Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.732541 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:56 crc kubenswrapper[4716]: E1209 15:10:56.733011 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:57.232989985 +0000 UTC m=+144.387733973 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.827805 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-f8rnc"] Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.829063 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-f8rnc" Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.834389 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:56 crc kubenswrapper[4716]: E1209 15:10:56.834923 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:57.334900999 +0000 UTC m=+144.489644987 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:56 crc kubenswrapper[4716]: W1209 15:10:56.835139 4716 reflector.go:561] object-"openshift-marketplace"/"community-operators-dockercfg-dmngl": failed to list *v1.Secret: secrets "community-operators-dockercfg-dmngl" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-marketplace": no relationship found between node 'crc' and this object Dec 09 15:10:56 crc kubenswrapper[4716]: E1209 15:10:56.835239 4716 reflector.go:158] "Unhandled Error" err="object-\"openshift-marketplace\"/\"community-operators-dockercfg-dmngl\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"community-operators-dockercfg-dmngl\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-marketplace\": no relationship found between node 'crc' and this object" logger="UnhandledError" Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.844376 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-pvl7k" podStartSLOduration=10.844342214 podStartE2EDuration="10.844342214s" podCreationTimestamp="2025-12-09 15:10:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:56.841953627 +0000 UTC m=+143.996697615" watchObservedRunningTime="2025-12-09 15:10:56.844342214 +0000 UTC m=+143.999086202" Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.935598 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.936040 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hg5hh\" (UniqueName: \"kubernetes.io/projected/6e8a9e79-4b63-4c3f-968c-9524e682af80-kube-api-access-hg5hh\") pod \"community-operators-f8rnc\" (UID: \"6e8a9e79-4b63-4c3f-968c-9524e682af80\") " pod="openshift-marketplace/community-operators-f8rnc" Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.936081 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e8a9e79-4b63-4c3f-968c-9524e682af80-catalog-content\") pod \"community-operators-f8rnc\" (UID: \"6e8a9e79-4b63-4c3f-968c-9524e682af80\") " pod="openshift-marketplace/community-operators-f8rnc" Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.936140 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e8a9e79-4b63-4c3f-968c-9524e682af80-utilities\") pod \"community-operators-f8rnc\" (UID: \"6e8a9e79-4b63-4c3f-968c-9524e682af80\") " pod="openshift-marketplace/community-operators-f8rnc" Dec 09 15:10:56 crc kubenswrapper[4716]: E1209 15:10:56.936315 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:57.43629353 +0000 UTC m=+144.591037518 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.982367 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-6grj2"] Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.983638 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6grj2" Dec 09 15:10:56 crc kubenswrapper[4716]: I1209 15:10:56.985472 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-f8rnc"] Dec 09 15:10:57 crc kubenswrapper[4716]: W1209 15:10:57.013158 4716 reflector.go:561] object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g": failed to list *v1.Secret: secrets "certified-operators-dockercfg-4rs5g" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-marketplace": no relationship found between node 'crc' and this object Dec 09 15:10:57 crc kubenswrapper[4716]: E1209 15:10:57.013247 4716 reflector.go:158] "Unhandled Error" err="object-\"openshift-marketplace\"/\"certified-operators-dockercfg-4rs5g\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"certified-operators-dockercfg-4rs5g\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-marketplace\": no relationship found between node 'crc' and this object" logger="UnhandledError" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.021393 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-ktjk2" podStartSLOduration=121.021362292 podStartE2EDuration="2m1.021362292s" podCreationTimestamp="2025-12-09 15:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:57.018137642 +0000 UTC m=+144.172881650" watchObservedRunningTime="2025-12-09 15:10:57.021362292 +0000 UTC m=+144.176106280" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.037097 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e8a9e79-4b63-4c3f-968c-9524e682af80-utilities\") pod \"community-operators-f8rnc\" (UID: \"6e8a9e79-4b63-4c3f-968c-9524e682af80\") " pod="openshift-marketplace/community-operators-f8rnc" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.037211 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hg5hh\" (UniqueName: \"kubernetes.io/projected/6e8a9e79-4b63-4c3f-968c-9524e682af80-kube-api-access-hg5hh\") pod \"community-operators-f8rnc\" (UID: \"6e8a9e79-4b63-4c3f-968c-9524e682af80\") " pod="openshift-marketplace/community-operators-f8rnc" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.037230 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e8a9e79-4b63-4c3f-968c-9524e682af80-catalog-content\") pod \"community-operators-f8rnc\" (UID: \"6e8a9e79-4b63-4c3f-968c-9524e682af80\") " pod="openshift-marketplace/community-operators-f8rnc" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.037264 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:57 crc kubenswrapper[4716]: E1209 15:10:57.037590 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:57.537577416 +0000 UTC m=+144.692321404 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.038285 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e8a9e79-4b63-4c3f-968c-9524e682af80-utilities\") pod \"community-operators-f8rnc\" (UID: \"6e8a9e79-4b63-4c3f-968c-9524e682af80\") " pod="openshift-marketplace/community-operators-f8rnc" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.038553 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e8a9e79-4b63-4c3f-968c-9524e682af80-catalog-content\") pod \"community-operators-f8rnc\" (UID: \"6e8a9e79-4b63-4c3f-968c-9524e682af80\") " pod="openshift-marketplace/community-operators-f8rnc" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.049554 4716 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-x572x container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.37:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.049659 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x572x" podUID="a5f3bc47-b766-47d3-b801-01f31edfe5ab" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.37:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.115895 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6grj2"] Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.137967 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:57 crc kubenswrapper[4716]: E1209 15:10:57.138730 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:57.638694319 +0000 UTC m=+144.793438307 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.138791 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/02be1451-6780-479c-ab94-37503fea3645-catalog-content\") pod \"certified-operators-6grj2\" (UID: \"02be1451-6780-479c-ab94-37503fea3645\") " pod="openshift-marketplace/certified-operators-6grj2" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.138841 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.138975 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wxb4v\" (UniqueName: \"kubernetes.io/projected/02be1451-6780-479c-ab94-37503fea3645-kube-api-access-wxb4v\") pod \"certified-operators-6grj2\" (UID: \"02be1451-6780-479c-ab94-37503fea3645\") " pod="openshift-marketplace/certified-operators-6grj2" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.139049 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/02be1451-6780-479c-ab94-37503fea3645-utilities\") pod \"certified-operators-6grj2\" (UID: \"02be1451-6780-479c-ab94-37503fea3645\") " pod="openshift-marketplace/certified-operators-6grj2" Dec 09 15:10:57 crc kubenswrapper[4716]: E1209 15:10:57.139285 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:57.639261085 +0000 UTC m=+144.794005073 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.151182 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-4zqlr"] Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.152517 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4zqlr" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.168801 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hg5hh\" (UniqueName: \"kubernetes.io/projected/6e8a9e79-4b63-4c3f-968c-9524e682af80-kube-api-access-hg5hh\") pod \"community-operators-f8rnc\" (UID: \"6e8a9e79-4b63-4c3f-968c-9524e682af80\") " pod="openshift-marketplace/community-operators-f8rnc" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.240284 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:57 crc kubenswrapper[4716]: E1209 15:10:57.241397 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:57.741359355 +0000 UTC m=+144.896103533 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.241464 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/02be1451-6780-479c-ab94-37503fea3645-catalog-content\") pod \"certified-operators-6grj2\" (UID: \"02be1451-6780-479c-ab94-37503fea3645\") " pod="openshift-marketplace/certified-operators-6grj2" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.242001 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/02be1451-6780-479c-ab94-37503fea3645-catalog-content\") pod \"certified-operators-6grj2\" (UID: \"02be1451-6780-479c-ab94-37503fea3645\") " pod="openshift-marketplace/certified-operators-6grj2" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.242052 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wxb4v\" (UniqueName: \"kubernetes.io/projected/02be1451-6780-479c-ab94-37503fea3645-kube-api-access-wxb4v\") pod \"certified-operators-6grj2\" (UID: \"02be1451-6780-479c-ab94-37503fea3645\") " pod="openshift-marketplace/certified-operators-6grj2" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.242156 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/02be1451-6780-479c-ab94-37503fea3645-utilities\") pod \"certified-operators-6grj2\" (UID: \"02be1451-6780-479c-ab94-37503fea3645\") " pod="openshift-marketplace/certified-operators-6grj2" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.242804 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/02be1451-6780-479c-ab94-37503fea3645-utilities\") pod \"certified-operators-6grj2\" (UID: \"02be1451-6780-479c-ab94-37503fea3645\") " pod="openshift-marketplace/certified-operators-6grj2" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.248213 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4zqlr"] Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.279528 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-vd9wq" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.284300 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-bfn6b"] Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.296988 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bfn6b" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.343787 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3f60d900-cfcc-4840-8f6b-cdcb043a510b-catalog-content\") pod \"community-operators-4zqlr\" (UID: \"3f60d900-cfcc-4840-8f6b-cdcb043a510b\") " pod="openshift-marketplace/community-operators-4zqlr" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.343844 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3f60d900-cfcc-4840-8f6b-cdcb043a510b-utilities\") pod \"community-operators-4zqlr\" (UID: \"3f60d900-cfcc-4840-8f6b-cdcb043a510b\") " pod="openshift-marketplace/community-operators-4zqlr" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.343869 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.343959 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mdlpn\" (UniqueName: \"kubernetes.io/projected/3f60d900-cfcc-4840-8f6b-cdcb043a510b-kube-api-access-mdlpn\") pod \"community-operators-4zqlr\" (UID: \"3f60d900-cfcc-4840-8f6b-cdcb043a510b\") " pod="openshift-marketplace/community-operators-4zqlr" Dec 09 15:10:57 crc kubenswrapper[4716]: E1209 15:10:57.344410 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:57.84439407 +0000 UTC m=+144.999138058 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.385693 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wxb4v\" (UniqueName: \"kubernetes.io/projected/02be1451-6780-479c-ab94-37503fea3645-kube-api-access-wxb4v\") pod \"certified-operators-6grj2\" (UID: \"02be1451-6780-479c-ab94-37503fea3645\") " pod="openshift-marketplace/certified-operators-6grj2" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.386245 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-nt28v" podStartSLOduration=121.386215332 podStartE2EDuration="2m1.386215332s" podCreationTimestamp="2025-12-09 15:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:57.325714517 +0000 UTC m=+144.480458505" watchObservedRunningTime="2025-12-09 15:10:57.386215332 +0000 UTC m=+144.540959320" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.393175 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bfn6b"] Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.446265 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.446811 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1cf42d24-d670-433b-8a60-11e6cadde0dd-utilities\") pod \"certified-operators-bfn6b\" (UID: \"1cf42d24-d670-433b-8a60-11e6cadde0dd\") " pod="openshift-marketplace/certified-operators-bfn6b" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.446885 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3f60d900-cfcc-4840-8f6b-cdcb043a510b-utilities\") pod \"community-operators-4zqlr\" (UID: \"3f60d900-cfcc-4840-8f6b-cdcb043a510b\") " pod="openshift-marketplace/community-operators-4zqlr" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.446921 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hpcnm\" (UniqueName: \"kubernetes.io/projected/1cf42d24-d670-433b-8a60-11e6cadde0dd-kube-api-access-hpcnm\") pod \"certified-operators-bfn6b\" (UID: \"1cf42d24-d670-433b-8a60-11e6cadde0dd\") " pod="openshift-marketplace/certified-operators-bfn6b" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.447016 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1cf42d24-d670-433b-8a60-11e6cadde0dd-catalog-content\") pod \"certified-operators-bfn6b\" (UID: \"1cf42d24-d670-433b-8a60-11e6cadde0dd\") " pod="openshift-marketplace/certified-operators-bfn6b" Dec 09 15:10:57 crc kubenswrapper[4716]: E1209 15:10:57.447238 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:57.94720729 +0000 UTC m=+145.101951278 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.447397 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mdlpn\" (UniqueName: \"kubernetes.io/projected/3f60d900-cfcc-4840-8f6b-cdcb043a510b-kube-api-access-mdlpn\") pod \"community-operators-4zqlr\" (UID: \"3f60d900-cfcc-4840-8f6b-cdcb043a510b\") " pod="openshift-marketplace/community-operators-4zqlr" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.447549 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3f60d900-cfcc-4840-8f6b-cdcb043a510b-utilities\") pod \"community-operators-4zqlr\" (UID: \"3f60d900-cfcc-4840-8f6b-cdcb043a510b\") " pod="openshift-marketplace/community-operators-4zqlr" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.448824 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3f60d900-cfcc-4840-8f6b-cdcb043a510b-catalog-content\") pod \"community-operators-4zqlr\" (UID: \"3f60d900-cfcc-4840-8f6b-cdcb043a510b\") " pod="openshift-marketplace/community-operators-4zqlr" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.447564 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3f60d900-cfcc-4840-8f6b-cdcb043a510b-catalog-content\") pod \"community-operators-4zqlr\" (UID: \"3f60d900-cfcc-4840-8f6b-cdcb043a510b\") " pod="openshift-marketplace/community-operators-4zqlr" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.488692 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" event={"ID":"a16e5eaf-14b0-47eb-8bc9-dfedd024c719","Type":"ContainerStarted","Data":"4745ec43eccc789ab71cbdd554e18e2d53f13a0a65e3b3f7e8920385db494065"} Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.496107 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-pvl7k" event={"ID":"103c6227-5dc6-4322-ae14-201ad9e08295","Type":"ContainerStarted","Data":"f0551b3e5382232faf765cf6d16629ff1121d5db436304274fa4be4fdb47bfb5"} Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.507008 4716 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-2rcsb container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" start-of-body= Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.507111 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-2rcsb" podUID="a25d26a9-7c6b-455e-9b8d-cc2fba08c576" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.38:8080/healthz\": dial tcp 10.217.0.38:8080: connect: connection refused" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.535702 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mdlpn\" (UniqueName: \"kubernetes.io/projected/3f60d900-cfcc-4840-8f6b-cdcb043a510b-kube-api-access-mdlpn\") pod \"community-operators-4zqlr\" (UID: \"3f60d900-cfcc-4840-8f6b-cdcb043a510b\") " pod="openshift-marketplace/community-operators-4zqlr" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.570822 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1cf42d24-d670-433b-8a60-11e6cadde0dd-catalog-content\") pod \"certified-operators-bfn6b\" (UID: \"1cf42d24-d670-433b-8a60-11e6cadde0dd\") " pod="openshift-marketplace/certified-operators-bfn6b" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.571275 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1cf42d24-d670-433b-8a60-11e6cadde0dd-utilities\") pod \"certified-operators-bfn6b\" (UID: \"1cf42d24-d670-433b-8a60-11e6cadde0dd\") " pod="openshift-marketplace/certified-operators-bfn6b" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.571352 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.571481 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hpcnm\" (UniqueName: \"kubernetes.io/projected/1cf42d24-d670-433b-8a60-11e6cadde0dd-kube-api-access-hpcnm\") pod \"certified-operators-bfn6b\" (UID: \"1cf42d24-d670-433b-8a60-11e6cadde0dd\") " pod="openshift-marketplace/certified-operators-bfn6b" Dec 09 15:10:57 crc kubenswrapper[4716]: E1209 15:10:57.573668 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:58.073613981 +0000 UTC m=+145.228357959 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.574608 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1cf42d24-d670-433b-8a60-11e6cadde0dd-catalog-content\") pod \"certified-operators-bfn6b\" (UID: \"1cf42d24-d670-433b-8a60-11e6cadde0dd\") " pod="openshift-marketplace/certified-operators-bfn6b" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.577435 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1cf42d24-d670-433b-8a60-11e6cadde0dd-utilities\") pod \"certified-operators-bfn6b\" (UID: \"1cf42d24-d670-433b-8a60-11e6cadde0dd\") " pod="openshift-marketplace/certified-operators-bfn6b" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.618795 4716 patch_prober.go:28] interesting pod/router-default-5444994796-w9bdm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 09 15:10:57 crc kubenswrapper[4716]: [-]has-synced failed: reason withheld Dec 09 15:10:57 crc kubenswrapper[4716]: [+]process-running ok Dec 09 15:10:57 crc kubenswrapper[4716]: healthz check failed Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.618861 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-w9bdm" podUID="e432b91b-278e-4a2f-81cc-4b7983a789da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.656518 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hpcnm\" (UniqueName: \"kubernetes.io/projected/1cf42d24-d670-433b-8a60-11e6cadde0dd-kube-api-access-hpcnm\") pod \"certified-operators-bfn6b\" (UID: \"1cf42d24-d670-433b-8a60-11e6cadde0dd\") " pod="openshift-marketplace/certified-operators-bfn6b" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.674931 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:57 crc kubenswrapper[4716]: E1209 15:10:57.675131 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:58.175099314 +0000 UTC m=+145.329843312 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.675348 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:57 crc kubenswrapper[4716]: E1209 15:10:57.675712 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:58.175703991 +0000 UTC m=+145.330447979 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.716980 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-8svp5" podStartSLOduration=121.716960196 podStartE2EDuration="2m1.716960196s" podCreationTimestamp="2025-12-09 15:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:57.708457838 +0000 UTC m=+144.863201826" watchObservedRunningTime="2025-12-09 15:10:57.716960196 +0000 UTC m=+144.871704194" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.782301 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:57 crc kubenswrapper[4716]: E1209 15:10:57.782556 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:58.282511042 +0000 UTC m=+145.437255030 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.782792 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:57 crc kubenswrapper[4716]: E1209 15:10:57.783251 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:58.283229672 +0000 UTC m=+145.437973860 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.791871 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.796302 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4zqlr" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.796887 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-f8rnc" Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.883770 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:57 crc kubenswrapper[4716]: E1209 15:10:57.884212 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:58.38419357 +0000 UTC m=+145.538937558 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:57 crc kubenswrapper[4716]: I1209 15:10:57.986013 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:57 crc kubenswrapper[4716]: E1209 15:10:57.986419 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:58.486402493 +0000 UTC m=+145.641146481 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:58 crc kubenswrapper[4716]: I1209 15:10:58.096215 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:58 crc kubenswrapper[4716]: E1209 15:10:58.096421 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:58.596395544 +0000 UTC m=+145.751139532 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:58 crc kubenswrapper[4716]: I1209 15:10:58.096580 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:58 crc kubenswrapper[4716]: E1209 15:10:58.096966 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:58.59694786 +0000 UTC m=+145.751691848 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:58 crc kubenswrapper[4716]: I1209 15:10:58.218840 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-x572x" Dec 09 15:10:58 crc kubenswrapper[4716]: I1209 15:10:58.218976 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:58 crc kubenswrapper[4716]: E1209 15:10:58.219368 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:58.719347618 +0000 UTC m=+145.874091596 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:58 crc kubenswrapper[4716]: I1209 15:10:58.321642 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:58 crc kubenswrapper[4716]: E1209 15:10:58.322207 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:58.822180369 +0000 UTC m=+145.976924367 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:58 crc kubenswrapper[4716]: I1209 15:10:58.332918 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" podStartSLOduration=123.332891049 podStartE2EDuration="2m3.332891049s" podCreationTimestamp="2025-12-09 15:08:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:58.330165052 +0000 UTC m=+145.484909040" watchObservedRunningTime="2025-12-09 15:10:58.332891049 +0000 UTC m=+145.487635037" Dec 09 15:10:58 crc kubenswrapper[4716]: I1209 15:10:58.333245 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-68bqx" podStartSLOduration=122.333238678 podStartE2EDuration="2m2.333238678s" podCreationTimestamp="2025-12-09 15:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:10:58.249907234 +0000 UTC m=+145.404651222" watchObservedRunningTime="2025-12-09 15:10:58.333238678 +0000 UTC m=+145.487982676" Dec 09 15:10:58 crc kubenswrapper[4716]: I1209 15:10:58.424652 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:58 crc kubenswrapper[4716]: E1209 15:10:58.425116 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:58.92505155 +0000 UTC m=+146.079795538 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:58 crc kubenswrapper[4716]: I1209 15:10:58.475080 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Dec 09 15:10:58 crc kubenswrapper[4716]: I1209 15:10:58.476547 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6grj2" Dec 09 15:10:58 crc kubenswrapper[4716]: I1209 15:10:58.477571 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bfn6b" Dec 09 15:10:58 crc kubenswrapper[4716]: I1209 15:10:58.533500 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:58 crc kubenswrapper[4716]: E1209 15:10:58.533979 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:59.033959291 +0000 UTC m=+146.188703279 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:58 crc kubenswrapper[4716]: I1209 15:10:58.554801 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-nlkfk" event={"ID":"9bc42122-0f65-4008-b335-5539ebd4ad62","Type":"ContainerStarted","Data":"c17e20809bd475fd6f8db2e86b661532347ef1aba1797c71e5946c5c6fd0adc0"} Dec 09 15:10:58 crc kubenswrapper[4716]: I1209 15:10:58.633216 4716 patch_prober.go:28] interesting pod/router-default-5444994796-w9bdm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 09 15:10:58 crc kubenswrapper[4716]: [-]has-synced failed: reason withheld Dec 09 15:10:58 crc kubenswrapper[4716]: [+]process-running ok Dec 09 15:10:58 crc kubenswrapper[4716]: healthz check failed Dec 09 15:10:58 crc kubenswrapper[4716]: I1209 15:10:58.633309 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-w9bdm" podUID="e432b91b-278e-4a2f-81cc-4b7983a789da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 09 15:10:58 crc kubenswrapper[4716]: I1209 15:10:58.634854 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:58 crc kubenswrapper[4716]: E1209 15:10:58.637070 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:59.137041878 +0000 UTC m=+146.291786026 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:58 crc kubenswrapper[4716]: I1209 15:10:58.678675 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-n8bgn"] Dec 09 15:10:58 crc kubenswrapper[4716]: I1209 15:10:58.681168 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n8bgn" Dec 09 15:10:58 crc kubenswrapper[4716]: I1209 15:10:58.689765 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Dec 09 15:10:58 crc kubenswrapper[4716]: I1209 15:10:58.707855 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-n8bgn"] Dec 09 15:10:58 crc kubenswrapper[4716]: I1209 15:10:58.746159 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3-utilities\") pod \"redhat-marketplace-n8bgn\" (UID: \"e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3\") " pod="openshift-marketplace/redhat-marketplace-n8bgn" Dec 09 15:10:58 crc kubenswrapper[4716]: I1209 15:10:58.746338 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3-catalog-content\") pod \"redhat-marketplace-n8bgn\" (UID: \"e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3\") " pod="openshift-marketplace/redhat-marketplace-n8bgn" Dec 09 15:10:58 crc kubenswrapper[4716]: I1209 15:10:58.765510 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kdx22\" (UniqueName: \"kubernetes.io/projected/e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3-kube-api-access-kdx22\") pod \"redhat-marketplace-n8bgn\" (UID: \"e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3\") " pod="openshift-marketplace/redhat-marketplace-n8bgn" Dec 09 15:10:58 crc kubenswrapper[4716]: I1209 15:10:58.765669 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:58 crc kubenswrapper[4716]: E1209 15:10:58.766440 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:59.266411402 +0000 UTC m=+146.421155390 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:58 crc kubenswrapper[4716]: I1209 15:10:58.854254 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-f8rnc"] Dec 09 15:10:58 crc kubenswrapper[4716]: I1209 15:10:58.854325 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4zqlr"] Dec 09 15:10:58 crc kubenswrapper[4716]: I1209 15:10:58.867105 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:58 crc kubenswrapper[4716]: I1209 15:10:58.867354 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kdx22\" (UniqueName: \"kubernetes.io/projected/e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3-kube-api-access-kdx22\") pod \"redhat-marketplace-n8bgn\" (UID: \"e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3\") " pod="openshift-marketplace/redhat-marketplace-n8bgn" Dec 09 15:10:58 crc kubenswrapper[4716]: I1209 15:10:58.867513 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3-utilities\") pod \"redhat-marketplace-n8bgn\" (UID: \"e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3\") " pod="openshift-marketplace/redhat-marketplace-n8bgn" Dec 09 15:10:58 crc kubenswrapper[4716]: I1209 15:10:58.867588 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3-catalog-content\") pod \"redhat-marketplace-n8bgn\" (UID: \"e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3\") " pod="openshift-marketplace/redhat-marketplace-n8bgn" Dec 09 15:10:58 crc kubenswrapper[4716]: I1209 15:10:58.868091 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3-catalog-content\") pod \"redhat-marketplace-n8bgn\" (UID: \"e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3\") " pod="openshift-marketplace/redhat-marketplace-n8bgn" Dec 09 15:10:58 crc kubenswrapper[4716]: E1209 15:10:58.868170 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:59.368149331 +0000 UTC m=+146.522893319 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:58 crc kubenswrapper[4716]: I1209 15:10:58.868742 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3-utilities\") pod \"redhat-marketplace-n8bgn\" (UID: \"e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3\") " pod="openshift-marketplace/redhat-marketplace-n8bgn" Dec 09 15:10:58 crc kubenswrapper[4716]: I1209 15:10:58.917717 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kdx22\" (UniqueName: \"kubernetes.io/projected/e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3-kube-api-access-kdx22\") pod \"redhat-marketplace-n8bgn\" (UID: \"e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3\") " pod="openshift-marketplace/redhat-marketplace-n8bgn" Dec 09 15:10:58 crc kubenswrapper[4716]: I1209 15:10:58.960784 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-ckjw6" Dec 09 15:10:58 crc kubenswrapper[4716]: I1209 15:10:58.962643 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-ckjw6" Dec 09 15:10:58 crc kubenswrapper[4716]: I1209 15:10:58.970791 4716 patch_prober.go:28] interesting pod/console-f9d7485db-ckjw6 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.13:8443/health\": dial tcp 10.217.0.13:8443: connect: connection refused" start-of-body= Dec 09 15:10:58 crc kubenswrapper[4716]: I1209 15:10:58.970871 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-ckjw6" podUID="8c6090d1-1d37-4305-9cbf-3c76c3237777" containerName="console" probeResult="failure" output="Get \"https://10.217.0.13:8443/health\": dial tcp 10.217.0.13:8443: connect: connection refused" Dec 09 15:10:58 crc kubenswrapper[4716]: I1209 15:10:58.975157 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:58 crc kubenswrapper[4716]: E1209 15:10:58.975915 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:59.475889949 +0000 UTC m=+146.630633937 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.004294 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-dsxtv"] Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.018012 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dsxtv" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.037477 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dsxtv"] Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.081822 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:59 crc kubenswrapper[4716]: E1209 15:10:59.082019 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:59.581980881 +0000 UTC m=+146.736724869 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.082264 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.082301 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ms8q9\" (UniqueName: \"kubernetes.io/projected/1fc78b58-085f-42f7-bec5-b28d0d2bc191-kube-api-access-ms8q9\") pod \"redhat-marketplace-dsxtv\" (UID: \"1fc78b58-085f-42f7-bec5-b28d0d2bc191\") " pod="openshift-marketplace/redhat-marketplace-dsxtv" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.082331 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1fc78b58-085f-42f7-bec5-b28d0d2bc191-catalog-content\") pod \"redhat-marketplace-dsxtv\" (UID: \"1fc78b58-085f-42f7-bec5-b28d0d2bc191\") " pod="openshift-marketplace/redhat-marketplace-dsxtv" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.082356 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1fc78b58-085f-42f7-bec5-b28d0d2bc191-utilities\") pod \"redhat-marketplace-dsxtv\" (UID: \"1fc78b58-085f-42f7-bec5-b28d0d2bc191\") " pod="openshift-marketplace/redhat-marketplace-dsxtv" Dec 09 15:10:59 crc kubenswrapper[4716]: E1209 15:10:59.084250 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:59.584234364 +0000 UTC m=+146.738978352 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.121614 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n8bgn" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.139698 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.140239 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.152608 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-8svp5" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.152758 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-8svp5" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.153171 4716 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.189358 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.189692 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ms8q9\" (UniqueName: \"kubernetes.io/projected/1fc78b58-085f-42f7-bec5-b28d0d2bc191-kube-api-access-ms8q9\") pod \"redhat-marketplace-dsxtv\" (UID: \"1fc78b58-085f-42f7-bec5-b28d0d2bc191\") " pod="openshift-marketplace/redhat-marketplace-dsxtv" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.189731 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1fc78b58-085f-42f7-bec5-b28d0d2bc191-catalog-content\") pod \"redhat-marketplace-dsxtv\" (UID: \"1fc78b58-085f-42f7-bec5-b28d0d2bc191\") " pod="openshift-marketplace/redhat-marketplace-dsxtv" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.189762 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1fc78b58-085f-42f7-bec5-b28d0d2bc191-utilities\") pod \"redhat-marketplace-dsxtv\" (UID: \"1fc78b58-085f-42f7-bec5-b28d0d2bc191\") " pod="openshift-marketplace/redhat-marketplace-dsxtv" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.190345 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1fc78b58-085f-42f7-bec5-b28d0d2bc191-utilities\") pod \"redhat-marketplace-dsxtv\" (UID: \"1fc78b58-085f-42f7-bec5-b28d0d2bc191\") " pod="openshift-marketplace/redhat-marketplace-dsxtv" Dec 09 15:10:59 crc kubenswrapper[4716]: E1209 15:10:59.192458 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:59.692432635 +0000 UTC m=+146.847176623 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.197893 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-8svp5" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.204745 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1fc78b58-085f-42f7-bec5-b28d0d2bc191-catalog-content\") pod \"redhat-marketplace-dsxtv\" (UID: \"1fc78b58-085f-42f7-bec5-b28d0d2bc191\") " pod="openshift-marketplace/redhat-marketplace-dsxtv" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.249515 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ms8q9\" (UniqueName: \"kubernetes.io/projected/1fc78b58-085f-42f7-bec5-b28d0d2bc191-kube-api-access-ms8q9\") pod \"redhat-marketplace-dsxtv\" (UID: \"1fc78b58-085f-42f7-bec5-b28d0d2bc191\") " pod="openshift-marketplace/redhat-marketplace-dsxtv" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.274117 4716 patch_prober.go:28] interesting pod/downloads-7954f5f757-8qjxz container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" start-of-body= Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.274188 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-8qjxz" podUID="31afed1d-4e1c-491e-b54b-a5e7e24077f1" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.274906 4716 patch_prober.go:28] interesting pod/downloads-7954f5f757-8qjxz container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" start-of-body= Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.274932 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-8qjxz" podUID="31afed1d-4e1c-491e-b54b-a5e7e24077f1" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.291697 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.291851 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.291902 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.291942 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.291971 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:10:59 crc kubenswrapper[4716]: E1209 15:10:59.293402 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-09 15:10:59.793387612 +0000 UTC m=+146.948131600 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ht662" (UID: "a9146318-442c-453e-977e-802cdaa5532a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.304879 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.310306 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.317428 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.324603 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.337359 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.340115 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.343639 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bfn6b"] Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.390749 4716 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-12-09T15:10:59.153203306Z","Handler":null,"Name":""} Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.394120 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.399962 4716 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.400026 4716 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Dec 09 15:10:59 crc kubenswrapper[4716]: E1209 15:10:59.400322 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-09 15:10:59.900296807 +0000 UTC m=+147.055040795 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.442316 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dsxtv" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.503098 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.510073 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6grj2"] Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.537843 4716 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.537903 4716 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.547539 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.592954 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-nt28v" Dec 09 15:10:59 crc kubenswrapper[4716]: W1209 15:10:59.629186 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod02be1451_6780_479c_ab94_37503fea3645.slice/crio-dfd9e4ea3313ea4f7ef2ea895327121a5ebc8113f1cf595354434be6c3b40514 WatchSource:0}: Error finding container dfd9e4ea3313ea4f7ef2ea895327121a5ebc8113f1cf595354434be6c3b40514: Status 404 returned error can't find the container with id dfd9e4ea3313ea4f7ef2ea895327121a5ebc8113f1cf595354434be6c3b40514 Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.637257 4716 patch_prober.go:28] interesting pod/router-default-5444994796-w9bdm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 09 15:10:59 crc kubenswrapper[4716]: [-]has-synced failed: reason withheld Dec 09 15:10:59 crc kubenswrapper[4716]: [+]process-running ok Dec 09 15:10:59 crc kubenswrapper[4716]: healthz check failed Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.637320 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-w9bdm" podUID="e432b91b-278e-4a2f-81cc-4b7983a789da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.638644 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bfn6b" event={"ID":"1cf42d24-d670-433b-8a60-11e6cadde0dd","Type":"ContainerStarted","Data":"66ba1daa9663c12a71673817977146a8bdb57ebfb215e5a2dddd3b4235f1963d"} Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.641290 4716 generic.go:334] "Generic (PLEG): container finished" podID="3f60d900-cfcc-4840-8f6b-cdcb043a510b" containerID="f58675f8901d19bd243d49e43a908d81d62de58bf83729d3fcaec7285722210c" exitCode=0 Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.641348 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4zqlr" event={"ID":"3f60d900-cfcc-4840-8f6b-cdcb043a510b","Type":"ContainerDied","Data":"f58675f8901d19bd243d49e43a908d81d62de58bf83729d3fcaec7285722210c"} Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.641370 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4zqlr" event={"ID":"3f60d900-cfcc-4840-8f6b-cdcb043a510b","Type":"ContainerStarted","Data":"19decfa94e89fd28925b7d57f995842e3e2e383f71554199d025c9064a694f85"} Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.653286 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f8rnc" event={"ID":"6e8a9e79-4b63-4c3f-968c-9524e682af80","Type":"ContainerStarted","Data":"8c31a4bb36f079f1bcb1abd04e2f3ceb7cd040d82eed56de2441195e99253552"} Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.653364 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f8rnc" event={"ID":"6e8a9e79-4b63-4c3f-968c-9524e682af80","Type":"ContainerStarted","Data":"c9004d01c8dfb77b44e0398c1ed3282aa30efc25c1422a52b4492762d94f6d2a"} Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.670552 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ht662\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.707355 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.713597 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-nlkfk" event={"ID":"9bc42122-0f65-4008-b335-5539ebd4ad62","Type":"ContainerStarted","Data":"c3c744cef75c850867a24f9000f572821ccb62db16f771d8547af2a01952f024"} Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.720189 4716 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.723743 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-8svp5" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.817972 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.849352 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-v42h6"] Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.873884 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v42h6" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.878886 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.897706 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:10:59 crc kubenswrapper[4716]: I1209 15:10:59.908840 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-v42h6"] Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.030277 4716 patch_prober.go:28] interesting pod/apiserver-76f77b778f-z4jgs container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Dec 09 15:11:00 crc kubenswrapper[4716]: [+]log ok Dec 09 15:11:00 crc kubenswrapper[4716]: [+]etcd ok Dec 09 15:11:00 crc kubenswrapper[4716]: [+]poststarthook/start-apiserver-admission-initializer ok Dec 09 15:11:00 crc kubenswrapper[4716]: [+]poststarthook/generic-apiserver-start-informers ok Dec 09 15:11:00 crc kubenswrapper[4716]: [+]poststarthook/max-in-flight-filter ok Dec 09 15:11:00 crc kubenswrapper[4716]: [+]poststarthook/storage-object-count-tracker-hook ok Dec 09 15:11:00 crc kubenswrapper[4716]: [+]poststarthook/image.openshift.io-apiserver-caches ok Dec 09 15:11:00 crc kubenswrapper[4716]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Dec 09 15:11:00 crc kubenswrapper[4716]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Dec 09 15:11:00 crc kubenswrapper[4716]: [+]poststarthook/project.openshift.io-projectcache ok Dec 09 15:11:00 crc kubenswrapper[4716]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Dec 09 15:11:00 crc kubenswrapper[4716]: [+]poststarthook/openshift.io-startinformers ok Dec 09 15:11:00 crc kubenswrapper[4716]: [+]poststarthook/openshift.io-restmapperupdater ok Dec 09 15:11:00 crc kubenswrapper[4716]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Dec 09 15:11:00 crc kubenswrapper[4716]: livez check failed Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.030942 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" podUID="a16e5eaf-14b0-47eb-8bc9-dfedd024c719" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.035921 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cq4hz\" (UniqueName: \"kubernetes.io/projected/07e1c69c-8b33-4342-8632-010554dfd1d5-kube-api-access-cq4hz\") pod \"redhat-operators-v42h6\" (UID: \"07e1c69c-8b33-4342-8632-010554dfd1d5\") " pod="openshift-marketplace/redhat-operators-v42h6" Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.035978 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07e1c69c-8b33-4342-8632-010554dfd1d5-utilities\") pod \"redhat-operators-v42h6\" (UID: \"07e1c69c-8b33-4342-8632-010554dfd1d5\") " pod="openshift-marketplace/redhat-operators-v42h6" Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.036021 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07e1c69c-8b33-4342-8632-010554dfd1d5-catalog-content\") pod \"redhat-operators-v42h6\" (UID: \"07e1c69c-8b33-4342-8632-010554dfd1d5\") " pod="openshift-marketplace/redhat-operators-v42h6" Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.102719 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.103669 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.109435 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.110660 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.114166 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.140804 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07e1c69c-8b33-4342-8632-010554dfd1d5-utilities\") pod \"redhat-operators-v42h6\" (UID: \"07e1c69c-8b33-4342-8632-010554dfd1d5\") " pod="openshift-marketplace/redhat-operators-v42h6" Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.140896 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07e1c69c-8b33-4342-8632-010554dfd1d5-catalog-content\") pod \"redhat-operators-v42h6\" (UID: \"07e1c69c-8b33-4342-8632-010554dfd1d5\") " pod="openshift-marketplace/redhat-operators-v42h6" Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.140992 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/f916e62e-4d9d-44f9-bb4e-4ef8519ccc45-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"f916e62e-4d9d-44f9-bb4e-4ef8519ccc45\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.141034 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f916e62e-4d9d-44f9-bb4e-4ef8519ccc45-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"f916e62e-4d9d-44f9-bb4e-4ef8519ccc45\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.141074 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cq4hz\" (UniqueName: \"kubernetes.io/projected/07e1c69c-8b33-4342-8632-010554dfd1d5-kube-api-access-cq4hz\") pod \"redhat-operators-v42h6\" (UID: \"07e1c69c-8b33-4342-8632-010554dfd1d5\") " pod="openshift-marketplace/redhat-operators-v42h6" Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.142809 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07e1c69c-8b33-4342-8632-010554dfd1d5-utilities\") pod \"redhat-operators-v42h6\" (UID: \"07e1c69c-8b33-4342-8632-010554dfd1d5\") " pod="openshift-marketplace/redhat-operators-v42h6" Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.143060 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07e1c69c-8b33-4342-8632-010554dfd1d5-catalog-content\") pod \"redhat-operators-v42h6\" (UID: \"07e1c69c-8b33-4342-8632-010554dfd1d5\") " pod="openshift-marketplace/redhat-operators-v42h6" Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.169171 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cq4hz\" (UniqueName: \"kubernetes.io/projected/07e1c69c-8b33-4342-8632-010554dfd1d5-kube-api-access-cq4hz\") pod \"redhat-operators-v42h6\" (UID: \"07e1c69c-8b33-4342-8632-010554dfd1d5\") " pod="openshift-marketplace/redhat-operators-v42h6" Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.200917 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5jwpg"] Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.202257 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5jwpg" Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.225417 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5jwpg"] Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.248502 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85d4a173-8cf0-4f51-b713-6a8624461b61-utilities\") pod \"redhat-operators-5jwpg\" (UID: \"85d4a173-8cf0-4f51-b713-6a8624461b61\") " pod="openshift-marketplace/redhat-operators-5jwpg" Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.248577 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fpm7d\" (UniqueName: \"kubernetes.io/projected/85d4a173-8cf0-4f51-b713-6a8624461b61-kube-api-access-fpm7d\") pod \"redhat-operators-5jwpg\" (UID: \"85d4a173-8cf0-4f51-b713-6a8624461b61\") " pod="openshift-marketplace/redhat-operators-5jwpg" Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.248736 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/f916e62e-4d9d-44f9-bb4e-4ef8519ccc45-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"f916e62e-4d9d-44f9-bb4e-4ef8519ccc45\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.248777 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f916e62e-4d9d-44f9-bb4e-4ef8519ccc45-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"f916e62e-4d9d-44f9-bb4e-4ef8519ccc45\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.248802 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85d4a173-8cf0-4f51-b713-6a8624461b61-catalog-content\") pod \"redhat-operators-5jwpg\" (UID: \"85d4a173-8cf0-4f51-b713-6a8624461b61\") " pod="openshift-marketplace/redhat-operators-5jwpg" Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.248918 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/f916e62e-4d9d-44f9-bb4e-4ef8519ccc45-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"f916e62e-4d9d-44f9-bb4e-4ef8519ccc45\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.292330 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f916e62e-4d9d-44f9-bb4e-4ef8519ccc45-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"f916e62e-4d9d-44f9-bb4e-4ef8519ccc45\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.301671 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dsxtv"] Dec 09 15:11:00 crc kubenswrapper[4716]: W1209 15:11:00.316308 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1fc78b58_085f_42f7_bec5_b28d0d2bc191.slice/crio-857846db4026a0de66208f6f8dbbc4a5061d74172731656e963801330af96ba3 WatchSource:0}: Error finding container 857846db4026a0de66208f6f8dbbc4a5061d74172731656e963801330af96ba3: Status 404 returned error can't find the container with id 857846db4026a0de66208f6f8dbbc4a5061d74172731656e963801330af96ba3 Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.350566 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85d4a173-8cf0-4f51-b713-6a8624461b61-utilities\") pod \"redhat-operators-5jwpg\" (UID: \"85d4a173-8cf0-4f51-b713-6a8624461b61\") " pod="openshift-marketplace/redhat-operators-5jwpg" Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.351697 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fpm7d\" (UniqueName: \"kubernetes.io/projected/85d4a173-8cf0-4f51-b713-6a8624461b61-kube-api-access-fpm7d\") pod \"redhat-operators-5jwpg\" (UID: \"85d4a173-8cf0-4f51-b713-6a8624461b61\") " pod="openshift-marketplace/redhat-operators-5jwpg" Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.351943 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85d4a173-8cf0-4f51-b713-6a8624461b61-catalog-content\") pod \"redhat-operators-5jwpg\" (UID: \"85d4a173-8cf0-4f51-b713-6a8624461b61\") " pod="openshift-marketplace/redhat-operators-5jwpg" Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.353234 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85d4a173-8cf0-4f51-b713-6a8624461b61-utilities\") pod \"redhat-operators-5jwpg\" (UID: \"85d4a173-8cf0-4f51-b713-6a8624461b61\") " pod="openshift-marketplace/redhat-operators-5jwpg" Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.353495 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85d4a173-8cf0-4f51-b713-6a8624461b61-catalog-content\") pod \"redhat-operators-5jwpg\" (UID: \"85d4a173-8cf0-4f51-b713-6a8624461b61\") " pod="openshift-marketplace/redhat-operators-5jwpg" Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.375058 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fpm7d\" (UniqueName: \"kubernetes.io/projected/85d4a173-8cf0-4f51-b713-6a8624461b61-kube-api-access-fpm7d\") pod \"redhat-operators-5jwpg\" (UID: \"85d4a173-8cf0-4f51-b713-6a8624461b61\") " pod="openshift-marketplace/redhat-operators-5jwpg" Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.376255 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v42h6" Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.385197 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-n8bgn"] Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.404135 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5jwpg" Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.449469 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.607297 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-w9bdm" Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.620902 4716 patch_prober.go:28] interesting pod/router-default-5444994796-w9bdm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 09 15:11:00 crc kubenswrapper[4716]: [-]has-synced failed: reason withheld Dec 09 15:11:00 crc kubenswrapper[4716]: [+]process-running ok Dec 09 15:11:00 crc kubenswrapper[4716]: healthz check failed Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.620990 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-w9bdm" podUID="e432b91b-278e-4a2f-81cc-4b7983a789da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.723459 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n8bgn" event={"ID":"e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3","Type":"ContainerStarted","Data":"e83b6d65ff49cab3387081debbc0a92ab7490a24706db0977eba245a286857bc"} Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.741332 4716 generic.go:334] "Generic (PLEG): container finished" podID="1fc78b58-085f-42f7-bec5-b28d0d2bc191" containerID="613f61deca0f4f584e3a978a11ee85ca0810bfe2a53d8c9fcf8f81a05855d94f" exitCode=0 Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.741483 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dsxtv" event={"ID":"1fc78b58-085f-42f7-bec5-b28d0d2bc191","Type":"ContainerDied","Data":"613f61deca0f4f584e3a978a11ee85ca0810bfe2a53d8c9fcf8f81a05855d94f"} Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.741552 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dsxtv" event={"ID":"1fc78b58-085f-42f7-bec5-b28d0d2bc191","Type":"ContainerStarted","Data":"857846db4026a0de66208f6f8dbbc4a5061d74172731656e963801330af96ba3"} Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.779036 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"8f18e8d714d99a7e11ca4c2bfa18396603bae1d45c2f0c6979c6ece7a3f83ff0"} Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.818897 4716 generic.go:334] "Generic (PLEG): container finished" podID="6e8a9e79-4b63-4c3f-968c-9524e682af80" containerID="8c31a4bb36f079f1bcb1abd04e2f3ceb7cd040d82eed56de2441195e99253552" exitCode=0 Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.819054 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f8rnc" event={"ID":"6e8a9e79-4b63-4c3f-968c-9524e682af80","Type":"ContainerDied","Data":"8c31a4bb36f079f1bcb1abd04e2f3ceb7cd040d82eed56de2441195e99253552"} Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.831606 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-2rcsb" Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.842195 4716 generic.go:334] "Generic (PLEG): container finished" podID="02be1451-6780-479c-ab94-37503fea3645" containerID="22f283856a04138e7bc6da28c0e08c5315f3ff0a5122ff018763f120f7469851" exitCode=0 Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.842360 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6grj2" event={"ID":"02be1451-6780-479c-ab94-37503fea3645","Type":"ContainerDied","Data":"22f283856a04138e7bc6da28c0e08c5315f3ff0a5122ff018763f120f7469851"} Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.842441 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6grj2" event={"ID":"02be1451-6780-479c-ab94-37503fea3645","Type":"ContainerStarted","Data":"dfd9e4ea3313ea4f7ef2ea895327121a5ebc8113f1cf595354434be6c3b40514"} Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.875353 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"9a9d73cb14bd53b67a035b18a4c09b1a5028a57f04f7c5b10dcce59129c94520"} Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.875417 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"3ce2e6549f0970c246fc19e6ef0627e033fa9e9165ea7d72f8d9fc060a468db2"} Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.906924 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-nlkfk" event={"ID":"9bc42122-0f65-4008-b335-5539ebd4ad62","Type":"ContainerStarted","Data":"57fd7c884e601be163257c7aec10e9c221e298d010ac8233aaf0547f907fa10c"} Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.937192 4716 generic.go:334] "Generic (PLEG): container finished" podID="1cf42d24-d670-433b-8a60-11e6cadde0dd" containerID="1fd185a0a7bdfe9dc50f5351db2a1503ab6d925647dc327bf1092ecda3f3cfdc" exitCode=0 Dec 09 15:11:00 crc kubenswrapper[4716]: I1209 15:11:00.938310 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bfn6b" event={"ID":"1cf42d24-d670-433b-8a60-11e6cadde0dd","Type":"ContainerDied","Data":"1fd185a0a7bdfe9dc50f5351db2a1503ab6d925647dc327bf1092ecda3f3cfdc"} Dec 09 15:11:01 crc kubenswrapper[4716]: I1209 15:11:01.012114 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-ht662"] Dec 09 15:11:01 crc kubenswrapper[4716]: W1209 15:11:01.060773 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod85d4a173_8cf0_4f51_b713_6a8624461b61.slice/crio-5d74736631f34e73e7e1b3643abf1a43c3aa9b29b96beb775678339392b3f07b WatchSource:0}: Error finding container 5d74736631f34e73e7e1b3643abf1a43c3aa9b29b96beb775678339392b3f07b: Status 404 returned error can't find the container with id 5d74736631f34e73e7e1b3643abf1a43c3aa9b29b96beb775678339392b3f07b Dec 09 15:11:01 crc kubenswrapper[4716]: I1209 15:11:01.102284 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-v42h6"] Dec 09 15:11:01 crc kubenswrapper[4716]: I1209 15:11:01.109531 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5jwpg"] Dec 09 15:11:01 crc kubenswrapper[4716]: I1209 15:11:01.110433 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-nlkfk" podStartSLOduration=15.110418797 podStartE2EDuration="15.110418797s" podCreationTimestamp="2025-12-09 15:10:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:11:01.079944033 +0000 UTC m=+148.234688021" watchObservedRunningTime="2025-12-09 15:11:01.110418797 +0000 UTC m=+148.265162775" Dec 09 15:11:01 crc kubenswrapper[4716]: I1209 15:11:01.259416 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Dec 09 15:11:01 crc kubenswrapper[4716]: I1209 15:11:01.269522 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Dec 09 15:11:01 crc kubenswrapper[4716]: I1209 15:11:01.402535 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:11:01 crc kubenswrapper[4716]: I1209 15:11:01.617682 4716 patch_prober.go:28] interesting pod/router-default-5444994796-w9bdm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 09 15:11:01 crc kubenswrapper[4716]: [-]has-synced failed: reason withheld Dec 09 15:11:01 crc kubenswrapper[4716]: [+]process-running ok Dec 09 15:11:01 crc kubenswrapper[4716]: healthz check failed Dec 09 15:11:01 crc kubenswrapper[4716]: I1209 15:11:01.617793 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-w9bdm" podUID="e432b91b-278e-4a2f-81cc-4b7983a789da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 09 15:11:01 crc kubenswrapper[4716]: I1209 15:11:01.984925 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"ce60669e7e94aab5bca9cc971795df107ccd15df58ef1215495b64acc3198314"} Dec 09 15:11:01 crc kubenswrapper[4716]: I1209 15:11:01.986166 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:11:01 crc kubenswrapper[4716]: I1209 15:11:01.989425 4716 generic.go:334] "Generic (PLEG): container finished" podID="07e1c69c-8b33-4342-8632-010554dfd1d5" containerID="eff16e0447e281aecb333aea9a281ccac85c36d6dcbf44a3c358158488481a52" exitCode=0 Dec 09 15:11:01 crc kubenswrapper[4716]: I1209 15:11:01.989865 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v42h6" event={"ID":"07e1c69c-8b33-4342-8632-010554dfd1d5","Type":"ContainerDied","Data":"eff16e0447e281aecb333aea9a281ccac85c36d6dcbf44a3c358158488481a52"} Dec 09 15:11:01 crc kubenswrapper[4716]: I1209 15:11:01.989959 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v42h6" event={"ID":"07e1c69c-8b33-4342-8632-010554dfd1d5","Type":"ContainerStarted","Data":"8791dd70049dd7f1da800e92ca374499ec23b4082be581d0a558beb26c573f87"} Dec 09 15:11:02 crc kubenswrapper[4716]: I1209 15:11:02.000864 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"f916e62e-4d9d-44f9-bb4e-4ef8519ccc45","Type":"ContainerStarted","Data":"331ff1f1230461d32f9e118b15653ff91152518db364ba3d21c4749f52f1c02d"} Dec 09 15:11:02 crc kubenswrapper[4716]: I1209 15:11:02.004459 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-ht662" event={"ID":"a9146318-442c-453e-977e-802cdaa5532a","Type":"ContainerStarted","Data":"a5887a2be70abfd66b398d4e97f01df280df498b37af2a3e030796b376e2a4fb"} Dec 09 15:11:02 crc kubenswrapper[4716]: I1209 15:11:02.004524 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-ht662" event={"ID":"a9146318-442c-453e-977e-802cdaa5532a","Type":"ContainerStarted","Data":"e940a4a14b56488d6124ffcd63397aabbbbc12d673621815919d2bb173d46f94"} Dec 09 15:11:02 crc kubenswrapper[4716]: I1209 15:11:02.005675 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:11:02 crc kubenswrapper[4716]: I1209 15:11:02.016169 4716 generic.go:334] "Generic (PLEG): container finished" podID="85d4a173-8cf0-4f51-b713-6a8624461b61" containerID="a9f711a43e0f1ae37491b98af162015b72d0a8ae01146c139b6f769c62fcf9e3" exitCode=0 Dec 09 15:11:02 crc kubenswrapper[4716]: I1209 15:11:02.017417 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5jwpg" event={"ID":"85d4a173-8cf0-4f51-b713-6a8624461b61","Type":"ContainerDied","Data":"a9f711a43e0f1ae37491b98af162015b72d0a8ae01146c139b6f769c62fcf9e3"} Dec 09 15:11:02 crc kubenswrapper[4716]: I1209 15:11:02.017451 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5jwpg" event={"ID":"85d4a173-8cf0-4f51-b713-6a8624461b61","Type":"ContainerStarted","Data":"5d74736631f34e73e7e1b3643abf1a43c3aa9b29b96beb775678339392b3f07b"} Dec 09 15:11:02 crc kubenswrapper[4716]: I1209 15:11:02.024294 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"a7de3f5479cccb467b173a85b42eb1f4714d3eb552b7a6900d932a310488195a"} Dec 09 15:11:02 crc kubenswrapper[4716]: I1209 15:11:02.024385 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"0df634fc1a2e8e2a5fbca051dc017cfc89b94b4d065a2ce4eb6b59925b789b10"} Dec 09 15:11:02 crc kubenswrapper[4716]: I1209 15:11:02.051992 4716 generic.go:334] "Generic (PLEG): container finished" podID="e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3" containerID="638be08b34d6174c39335a4616f23edab30f637d608ad3ba0e9ccdaf302bf789" exitCode=0 Dec 09 15:11:02 crc kubenswrapper[4716]: I1209 15:11:02.054186 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n8bgn" event={"ID":"e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3","Type":"ContainerDied","Data":"638be08b34d6174c39335a4616f23edab30f637d608ad3ba0e9ccdaf302bf789"} Dec 09 15:11:02 crc kubenswrapper[4716]: I1209 15:11:02.103730 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-ht662" podStartSLOduration=126.103702149 podStartE2EDuration="2m6.103702149s" podCreationTimestamp="2025-12-09 15:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:11:02.091569559 +0000 UTC m=+149.246313567" watchObservedRunningTime="2025-12-09 15:11:02.103702149 +0000 UTC m=+149.258446137" Dec 09 15:11:02 crc kubenswrapper[4716]: I1209 15:11:02.612532 4716 patch_prober.go:28] interesting pod/router-default-5444994796-w9bdm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 09 15:11:02 crc kubenswrapper[4716]: [-]has-synced failed: reason withheld Dec 09 15:11:02 crc kubenswrapper[4716]: [+]process-running ok Dec 09 15:11:02 crc kubenswrapper[4716]: healthz check failed Dec 09 15:11:02 crc kubenswrapper[4716]: I1209 15:11:02.612613 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-w9bdm" podUID="e432b91b-278e-4a2f-81cc-4b7983a789da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 09 15:11:03 crc kubenswrapper[4716]: I1209 15:11:03.067965 4716 generic.go:334] "Generic (PLEG): container finished" podID="8e0c79e5-04c3-4242-9472-0cb67c34b499" containerID="a4bd52af2627d0fdaa7912ece7f1ef16690a42bc345f1501ef0fece4bc24c8fe" exitCode=0 Dec 09 15:11:03 crc kubenswrapper[4716]: I1209 15:11:03.068059 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421540-z6t4d" event={"ID":"8e0c79e5-04c3-4242-9472-0cb67c34b499","Type":"ContainerDied","Data":"a4bd52af2627d0fdaa7912ece7f1ef16690a42bc345f1501ef0fece4bc24c8fe"} Dec 09 15:11:03 crc kubenswrapper[4716]: I1209 15:11:03.075755 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"f916e62e-4d9d-44f9-bb4e-4ef8519ccc45","Type":"ContainerStarted","Data":"355f0af6092a8d5c4ddfc46c4012749ffa34e8f199f1ed1dd3c118d3e9cfb054"} Dec 09 15:11:03 crc kubenswrapper[4716]: I1209 15:11:03.139124 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=3.139104133 podStartE2EDuration="3.139104133s" podCreationTimestamp="2025-12-09 15:11:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:11:03.130593522 +0000 UTC m=+150.285337510" watchObservedRunningTime="2025-12-09 15:11:03.139104133 +0000 UTC m=+150.293848131" Dec 09 15:11:03 crc kubenswrapper[4716]: I1209 15:11:03.448524 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Dec 09 15:11:03 crc kubenswrapper[4716]: I1209 15:11:03.449571 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 09 15:11:03 crc kubenswrapper[4716]: I1209 15:11:03.453380 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Dec 09 15:11:03 crc kubenswrapper[4716]: I1209 15:11:03.455172 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Dec 09 15:11:03 crc kubenswrapper[4716]: I1209 15:11:03.455469 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Dec 09 15:11:03 crc kubenswrapper[4716]: I1209 15:11:03.545683 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/cdb061c1-7c3f-4ece-90a1-7dbea1c6a604-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"cdb061c1-7c3f-4ece-90a1-7dbea1c6a604\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 09 15:11:03 crc kubenswrapper[4716]: I1209 15:11:03.545783 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/cdb061c1-7c3f-4ece-90a1-7dbea1c6a604-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"cdb061c1-7c3f-4ece-90a1-7dbea1c6a604\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 09 15:11:03 crc kubenswrapper[4716]: I1209 15:11:03.610854 4716 patch_prober.go:28] interesting pod/router-default-5444994796-w9bdm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 09 15:11:03 crc kubenswrapper[4716]: [-]has-synced failed: reason withheld Dec 09 15:11:03 crc kubenswrapper[4716]: [+]process-running ok Dec 09 15:11:03 crc kubenswrapper[4716]: healthz check failed Dec 09 15:11:03 crc kubenswrapper[4716]: I1209 15:11:03.610968 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-w9bdm" podUID="e432b91b-278e-4a2f-81cc-4b7983a789da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 09 15:11:03 crc kubenswrapper[4716]: I1209 15:11:03.647644 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/cdb061c1-7c3f-4ece-90a1-7dbea1c6a604-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"cdb061c1-7c3f-4ece-90a1-7dbea1c6a604\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 09 15:11:03 crc kubenswrapper[4716]: I1209 15:11:03.647731 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/cdb061c1-7c3f-4ece-90a1-7dbea1c6a604-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"cdb061c1-7c3f-4ece-90a1-7dbea1c6a604\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 09 15:11:03 crc kubenswrapper[4716]: I1209 15:11:03.647853 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/cdb061c1-7c3f-4ece-90a1-7dbea1c6a604-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"cdb061c1-7c3f-4ece-90a1-7dbea1c6a604\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 09 15:11:03 crc kubenswrapper[4716]: I1209 15:11:03.677666 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/cdb061c1-7c3f-4ece-90a1-7dbea1c6a604-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"cdb061c1-7c3f-4ece-90a1-7dbea1c6a604\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 09 15:11:03 crc kubenswrapper[4716]: I1209 15:11:03.789200 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 09 15:11:04 crc kubenswrapper[4716]: I1209 15:11:04.106791 4716 generic.go:334] "Generic (PLEG): container finished" podID="f916e62e-4d9d-44f9-bb4e-4ef8519ccc45" containerID="355f0af6092a8d5c4ddfc46c4012749ffa34e8f199f1ed1dd3c118d3e9cfb054" exitCode=0 Dec 09 15:11:04 crc kubenswrapper[4716]: I1209 15:11:04.107017 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"f916e62e-4d9d-44f9-bb4e-4ef8519ccc45","Type":"ContainerDied","Data":"355f0af6092a8d5c4ddfc46c4012749ffa34e8f199f1ed1dd3c118d3e9cfb054"} Dec 09 15:11:04 crc kubenswrapper[4716]: I1209 15:11:04.138945 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:11:04 crc kubenswrapper[4716]: I1209 15:11:04.154644 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-z4jgs" Dec 09 15:11:04 crc kubenswrapper[4716]: I1209 15:11:04.611997 4716 patch_prober.go:28] interesting pod/router-default-5444994796-w9bdm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 09 15:11:04 crc kubenswrapper[4716]: [-]has-synced failed: reason withheld Dec 09 15:11:04 crc kubenswrapper[4716]: [+]process-running ok Dec 09 15:11:04 crc kubenswrapper[4716]: healthz check failed Dec 09 15:11:04 crc kubenswrapper[4716]: I1209 15:11:04.612598 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-w9bdm" podUID="e432b91b-278e-4a2f-81cc-4b7983a789da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 09 15:11:04 crc kubenswrapper[4716]: I1209 15:11:04.693952 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421540-z6t4d" Dec 09 15:11:04 crc kubenswrapper[4716]: I1209 15:11:04.718588 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Dec 09 15:11:04 crc kubenswrapper[4716]: I1209 15:11:04.790038 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8e0c79e5-04c3-4242-9472-0cb67c34b499-config-volume\") pod \"8e0c79e5-04c3-4242-9472-0cb67c34b499\" (UID: \"8e0c79e5-04c3-4242-9472-0cb67c34b499\") " Dec 09 15:11:04 crc kubenswrapper[4716]: I1209 15:11:04.790243 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8e0c79e5-04c3-4242-9472-0cb67c34b499-secret-volume\") pod \"8e0c79e5-04c3-4242-9472-0cb67c34b499\" (UID: \"8e0c79e5-04c3-4242-9472-0cb67c34b499\") " Dec 09 15:11:04 crc kubenswrapper[4716]: I1209 15:11:04.790433 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nrczx\" (UniqueName: \"kubernetes.io/projected/8e0c79e5-04c3-4242-9472-0cb67c34b499-kube-api-access-nrczx\") pod \"8e0c79e5-04c3-4242-9472-0cb67c34b499\" (UID: \"8e0c79e5-04c3-4242-9472-0cb67c34b499\") " Dec 09 15:11:04 crc kubenswrapper[4716]: I1209 15:11:04.794795 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e0c79e5-04c3-4242-9472-0cb67c34b499-config-volume" (OuterVolumeSpecName: "config-volume") pod "8e0c79e5-04c3-4242-9472-0cb67c34b499" (UID: "8e0c79e5-04c3-4242-9472-0cb67c34b499"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:11:04 crc kubenswrapper[4716]: I1209 15:11:04.801099 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e0c79e5-04c3-4242-9472-0cb67c34b499-kube-api-access-nrczx" (OuterVolumeSpecName: "kube-api-access-nrczx") pod "8e0c79e5-04c3-4242-9472-0cb67c34b499" (UID: "8e0c79e5-04c3-4242-9472-0cb67c34b499"). InnerVolumeSpecName "kube-api-access-nrczx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:11:04 crc kubenswrapper[4716]: I1209 15:11:04.814298 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e0c79e5-04c3-4242-9472-0cb67c34b499-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "8e0c79e5-04c3-4242-9472-0cb67c34b499" (UID: "8e0c79e5-04c3-4242-9472-0cb67c34b499"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:11:04 crc kubenswrapper[4716]: I1209 15:11:04.892339 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nrczx\" (UniqueName: \"kubernetes.io/projected/8e0c79e5-04c3-4242-9472-0cb67c34b499-kube-api-access-nrczx\") on node \"crc\" DevicePath \"\"" Dec 09 15:11:04 crc kubenswrapper[4716]: I1209 15:11:04.893285 4716 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8e0c79e5-04c3-4242-9472-0cb67c34b499-config-volume\") on node \"crc\" DevicePath \"\"" Dec 09 15:11:04 crc kubenswrapper[4716]: I1209 15:11:04.893299 4716 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8e0c79e5-04c3-4242-9472-0cb67c34b499-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 09 15:11:05 crc kubenswrapper[4716]: I1209 15:11:05.173359 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421540-z6t4d" event={"ID":"8e0c79e5-04c3-4242-9472-0cb67c34b499","Type":"ContainerDied","Data":"503b3dbc558c7ae803355afe7671420c10a756a326039691a5f9737206244735"} Dec 09 15:11:05 crc kubenswrapper[4716]: I1209 15:11:05.173407 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="503b3dbc558c7ae803355afe7671420c10a756a326039691a5f9737206244735" Dec 09 15:11:05 crc kubenswrapper[4716]: I1209 15:11:05.173467 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421540-z6t4d" Dec 09 15:11:05 crc kubenswrapper[4716]: I1209 15:11:05.201965 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"cdb061c1-7c3f-4ece-90a1-7dbea1c6a604","Type":"ContainerStarted","Data":"09729bfbe17c50103df967246758fb45ac64923b1eaf97efe8ee663cd82ffb4e"} Dec 09 15:11:05 crc kubenswrapper[4716]: I1209 15:11:05.613935 4716 patch_prober.go:28] interesting pod/router-default-5444994796-w9bdm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 09 15:11:05 crc kubenswrapper[4716]: [-]has-synced failed: reason withheld Dec 09 15:11:05 crc kubenswrapper[4716]: [+]process-running ok Dec 09 15:11:05 crc kubenswrapper[4716]: healthz check failed Dec 09 15:11:05 crc kubenswrapper[4716]: I1209 15:11:05.614477 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-w9bdm" podUID="e432b91b-278e-4a2f-81cc-4b7983a789da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 09 15:11:05 crc kubenswrapper[4716]: I1209 15:11:05.697989 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 09 15:11:05 crc kubenswrapper[4716]: I1209 15:11:05.810082 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/f916e62e-4d9d-44f9-bb4e-4ef8519ccc45-kubelet-dir\") pod \"f916e62e-4d9d-44f9-bb4e-4ef8519ccc45\" (UID: \"f916e62e-4d9d-44f9-bb4e-4ef8519ccc45\") " Dec 09 15:11:05 crc kubenswrapper[4716]: I1209 15:11:05.810187 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f916e62e-4d9d-44f9-bb4e-4ef8519ccc45-kube-api-access\") pod \"f916e62e-4d9d-44f9-bb4e-4ef8519ccc45\" (UID: \"f916e62e-4d9d-44f9-bb4e-4ef8519ccc45\") " Dec 09 15:11:05 crc kubenswrapper[4716]: I1209 15:11:05.811910 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f916e62e-4d9d-44f9-bb4e-4ef8519ccc45-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "f916e62e-4d9d-44f9-bb4e-4ef8519ccc45" (UID: "f916e62e-4d9d-44f9-bb4e-4ef8519ccc45"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 15:11:05 crc kubenswrapper[4716]: I1209 15:11:05.826467 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f916e62e-4d9d-44f9-bb4e-4ef8519ccc45-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "f916e62e-4d9d-44f9-bb4e-4ef8519ccc45" (UID: "f916e62e-4d9d-44f9-bb4e-4ef8519ccc45"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:11:05 crc kubenswrapper[4716]: I1209 15:11:05.828384 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-pvl7k" Dec 09 15:11:05 crc kubenswrapper[4716]: I1209 15:11:05.911797 4716 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/f916e62e-4d9d-44f9-bb4e-4ef8519ccc45-kubelet-dir\") on node \"crc\" DevicePath \"\"" Dec 09 15:11:05 crc kubenswrapper[4716]: I1209 15:11:05.911847 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f916e62e-4d9d-44f9-bb4e-4ef8519ccc45-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 09 15:11:06 crc kubenswrapper[4716]: I1209 15:11:06.220238 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"cdb061c1-7c3f-4ece-90a1-7dbea1c6a604","Type":"ContainerStarted","Data":"c9bb8aacaa877d9ce303b4d6dc412752f289590be7bc085523cb2f1cd245e635"} Dec 09 15:11:06 crc kubenswrapper[4716]: I1209 15:11:06.228159 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"f916e62e-4d9d-44f9-bb4e-4ef8519ccc45","Type":"ContainerDied","Data":"331ff1f1230461d32f9e118b15653ff91152518db364ba3d21c4749f52f1c02d"} Dec 09 15:11:06 crc kubenswrapper[4716]: I1209 15:11:06.228241 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="331ff1f1230461d32f9e118b15653ff91152518db364ba3d21c4749f52f1c02d" Dec 09 15:11:06 crc kubenswrapper[4716]: I1209 15:11:06.228242 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 09 15:11:06 crc kubenswrapper[4716]: I1209 15:11:06.240600 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=3.240565616 podStartE2EDuration="3.240565616s" podCreationTimestamp="2025-12-09 15:11:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:11:06.238612651 +0000 UTC m=+153.393356649" watchObservedRunningTime="2025-12-09 15:11:06.240565616 +0000 UTC m=+153.395309614" Dec 09 15:11:06 crc kubenswrapper[4716]: I1209 15:11:06.610654 4716 patch_prober.go:28] interesting pod/router-default-5444994796-w9bdm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 09 15:11:06 crc kubenswrapper[4716]: [-]has-synced failed: reason withheld Dec 09 15:11:06 crc kubenswrapper[4716]: [+]process-running ok Dec 09 15:11:06 crc kubenswrapper[4716]: healthz check failed Dec 09 15:11:06 crc kubenswrapper[4716]: I1209 15:11:06.610742 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-w9bdm" podUID="e432b91b-278e-4a2f-81cc-4b7983a789da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 09 15:11:07 crc kubenswrapper[4716]: I1209 15:11:07.299095 4716 generic.go:334] "Generic (PLEG): container finished" podID="cdb061c1-7c3f-4ece-90a1-7dbea1c6a604" containerID="c9bb8aacaa877d9ce303b4d6dc412752f289590be7bc085523cb2f1cd245e635" exitCode=0 Dec 09 15:11:07 crc kubenswrapper[4716]: I1209 15:11:07.299534 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"cdb061c1-7c3f-4ece-90a1-7dbea1c6a604","Type":"ContainerDied","Data":"c9bb8aacaa877d9ce303b4d6dc412752f289590be7bc085523cb2f1cd245e635"} Dec 09 15:11:07 crc kubenswrapper[4716]: I1209 15:11:07.611375 4716 patch_prober.go:28] interesting pod/router-default-5444994796-w9bdm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 09 15:11:07 crc kubenswrapper[4716]: [-]has-synced failed: reason withheld Dec 09 15:11:07 crc kubenswrapper[4716]: [+]process-running ok Dec 09 15:11:07 crc kubenswrapper[4716]: healthz check failed Dec 09 15:11:07 crc kubenswrapper[4716]: I1209 15:11:07.611498 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-w9bdm" podUID="e432b91b-278e-4a2f-81cc-4b7983a789da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 09 15:11:08 crc kubenswrapper[4716]: I1209 15:11:08.610585 4716 patch_prober.go:28] interesting pod/router-default-5444994796-w9bdm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 09 15:11:08 crc kubenswrapper[4716]: [-]has-synced failed: reason withheld Dec 09 15:11:08 crc kubenswrapper[4716]: [+]process-running ok Dec 09 15:11:08 crc kubenswrapper[4716]: healthz check failed Dec 09 15:11:08 crc kubenswrapper[4716]: I1209 15:11:08.611069 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-w9bdm" podUID="e432b91b-278e-4a2f-81cc-4b7983a789da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 09 15:11:08 crc kubenswrapper[4716]: I1209 15:11:08.961269 4716 patch_prober.go:28] interesting pod/console-f9d7485db-ckjw6 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.13:8443/health\": dial tcp 10.217.0.13:8443: connect: connection refused" start-of-body= Dec 09 15:11:08 crc kubenswrapper[4716]: I1209 15:11:08.961432 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-ckjw6" podUID="8c6090d1-1d37-4305-9cbf-3c76c3237777" containerName="console" probeResult="failure" output="Get \"https://10.217.0.13:8443/health\": dial tcp 10.217.0.13:8443: connect: connection refused" Dec 09 15:11:09 crc kubenswrapper[4716]: I1209 15:11:09.256007 4716 patch_prober.go:28] interesting pod/downloads-7954f5f757-8qjxz container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" start-of-body= Dec 09 15:11:09 crc kubenswrapper[4716]: I1209 15:11:09.256105 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-8qjxz" podUID="31afed1d-4e1c-491e-b54b-a5e7e24077f1" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" Dec 09 15:11:09 crc kubenswrapper[4716]: I1209 15:11:09.257123 4716 patch_prober.go:28] interesting pod/downloads-7954f5f757-8qjxz container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" start-of-body= Dec 09 15:11:09 crc kubenswrapper[4716]: I1209 15:11:09.257224 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-8qjxz" podUID="31afed1d-4e1c-491e-b54b-a5e7e24077f1" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" Dec 09 15:11:09 crc kubenswrapper[4716]: I1209 15:11:09.609937 4716 patch_prober.go:28] interesting pod/router-default-5444994796-w9bdm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 09 15:11:09 crc kubenswrapper[4716]: [-]has-synced failed: reason withheld Dec 09 15:11:09 crc kubenswrapper[4716]: [+]process-running ok Dec 09 15:11:09 crc kubenswrapper[4716]: healthz check failed Dec 09 15:11:09 crc kubenswrapper[4716]: I1209 15:11:09.609998 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-w9bdm" podUID="e432b91b-278e-4a2f-81cc-4b7983a789da" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 09 15:11:10 crc kubenswrapper[4716]: I1209 15:11:10.624476 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-w9bdm" Dec 09 15:11:10 crc kubenswrapper[4716]: I1209 15:11:10.628531 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-w9bdm" Dec 09 15:11:17 crc kubenswrapper[4716]: I1209 15:11:17.922301 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 15:11:17 crc kubenswrapper[4716]: I1209 15:11:17.923229 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 15:11:18 crc kubenswrapper[4716]: I1209 15:11:18.774744 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f0e1f1bc-46bd-4293-a9b9-d57c1f83a613-metrics-certs\") pod \"network-metrics-daemon-gpl2n\" (UID: \"f0e1f1bc-46bd-4293-a9b9-d57c1f83a613\") " pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:11:18 crc kubenswrapper[4716]: I1209 15:11:18.822896 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f0e1f1bc-46bd-4293-a9b9-d57c1f83a613-metrics-certs\") pod \"network-metrics-daemon-gpl2n\" (UID: \"f0e1f1bc-46bd-4293-a9b9-d57c1f83a613\") " pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:11:18 crc kubenswrapper[4716]: I1209 15:11:18.847867 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-gpl2n" Dec 09 15:11:18 crc kubenswrapper[4716]: I1209 15:11:18.966157 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-ckjw6" Dec 09 15:11:18 crc kubenswrapper[4716]: I1209 15:11:18.969971 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-ckjw6" Dec 09 15:11:19 crc kubenswrapper[4716]: I1209 15:11:19.256776 4716 patch_prober.go:28] interesting pod/downloads-7954f5f757-8qjxz container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" start-of-body= Dec 09 15:11:19 crc kubenswrapper[4716]: I1209 15:11:19.257277 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-8qjxz" podUID="31afed1d-4e1c-491e-b54b-a5e7e24077f1" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" Dec 09 15:11:19 crc kubenswrapper[4716]: I1209 15:11:19.258339 4716 patch_prober.go:28] interesting pod/downloads-7954f5f757-8qjxz container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" start-of-body= Dec 09 15:11:19 crc kubenswrapper[4716]: I1209 15:11:19.258470 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-8qjxz" podUID="31afed1d-4e1c-491e-b54b-a5e7e24077f1" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" Dec 09 15:11:19 crc kubenswrapper[4716]: I1209 15:11:19.258675 4716 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-8qjxz" Dec 09 15:11:19 crc kubenswrapper[4716]: I1209 15:11:19.259799 4716 patch_prober.go:28] interesting pod/downloads-7954f5f757-8qjxz container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" start-of-body= Dec 09 15:11:19 crc kubenswrapper[4716]: I1209 15:11:19.259943 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-8qjxz" podUID="31afed1d-4e1c-491e-b54b-a5e7e24077f1" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" Dec 09 15:11:19 crc kubenswrapper[4716]: I1209 15:11:19.260290 4716 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"2a714e28ae6ce133113cb2ff61af5fc8d1f870097a4f4e51fcfe162256cdea64"} pod="openshift-console/downloads-7954f5f757-8qjxz" containerMessage="Container download-server failed liveness probe, will be restarted" Dec 09 15:11:19 crc kubenswrapper[4716]: I1209 15:11:19.260555 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-8qjxz" podUID="31afed1d-4e1c-491e-b54b-a5e7e24077f1" containerName="download-server" containerID="cri-o://2a714e28ae6ce133113cb2ff61af5fc8d1f870097a4f4e51fcfe162256cdea64" gracePeriod=2 Dec 09 15:11:19 crc kubenswrapper[4716]: I1209 15:11:19.904485 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:11:20 crc kubenswrapper[4716]: I1209 15:11:20.075491 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 09 15:11:20 crc kubenswrapper[4716]: I1209 15:11:20.196318 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/cdb061c1-7c3f-4ece-90a1-7dbea1c6a604-kube-api-access\") pod \"cdb061c1-7c3f-4ece-90a1-7dbea1c6a604\" (UID: \"cdb061c1-7c3f-4ece-90a1-7dbea1c6a604\") " Dec 09 15:11:20 crc kubenswrapper[4716]: I1209 15:11:20.196885 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/cdb061c1-7c3f-4ece-90a1-7dbea1c6a604-kubelet-dir\") pod \"cdb061c1-7c3f-4ece-90a1-7dbea1c6a604\" (UID: \"cdb061c1-7c3f-4ece-90a1-7dbea1c6a604\") " Dec 09 15:11:20 crc kubenswrapper[4716]: I1209 15:11:20.197245 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cdb061c1-7c3f-4ece-90a1-7dbea1c6a604-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "cdb061c1-7c3f-4ece-90a1-7dbea1c6a604" (UID: "cdb061c1-7c3f-4ece-90a1-7dbea1c6a604"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 15:11:20 crc kubenswrapper[4716]: I1209 15:11:20.197547 4716 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/cdb061c1-7c3f-4ece-90a1-7dbea1c6a604-kubelet-dir\") on node \"crc\" DevicePath \"\"" Dec 09 15:11:20 crc kubenswrapper[4716]: I1209 15:11:20.207261 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cdb061c1-7c3f-4ece-90a1-7dbea1c6a604-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "cdb061c1-7c3f-4ece-90a1-7dbea1c6a604" (UID: "cdb061c1-7c3f-4ece-90a1-7dbea1c6a604"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:11:20 crc kubenswrapper[4716]: I1209 15:11:20.299592 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/cdb061c1-7c3f-4ece-90a1-7dbea1c6a604-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 09 15:11:20 crc kubenswrapper[4716]: I1209 15:11:20.683485 4716 generic.go:334] "Generic (PLEG): container finished" podID="31afed1d-4e1c-491e-b54b-a5e7e24077f1" containerID="2a714e28ae6ce133113cb2ff61af5fc8d1f870097a4f4e51fcfe162256cdea64" exitCode=0 Dec 09 15:11:20 crc kubenswrapper[4716]: I1209 15:11:20.683598 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-8qjxz" event={"ID":"31afed1d-4e1c-491e-b54b-a5e7e24077f1","Type":"ContainerDied","Data":"2a714e28ae6ce133113cb2ff61af5fc8d1f870097a4f4e51fcfe162256cdea64"} Dec 09 15:11:20 crc kubenswrapper[4716]: I1209 15:11:20.684752 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"cdb061c1-7c3f-4ece-90a1-7dbea1c6a604","Type":"ContainerDied","Data":"09729bfbe17c50103df967246758fb45ac64923b1eaf97efe8ee663cd82ffb4e"} Dec 09 15:11:20 crc kubenswrapper[4716]: I1209 15:11:20.684791 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="09729bfbe17c50103df967246758fb45ac64923b1eaf97efe8ee663cd82ffb4e" Dec 09 15:11:20 crc kubenswrapper[4716]: I1209 15:11:20.684875 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 09 15:11:29 crc kubenswrapper[4716]: I1209 15:11:29.258049 4716 patch_prober.go:28] interesting pod/downloads-7954f5f757-8qjxz container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" start-of-body= Dec 09 15:11:29 crc kubenswrapper[4716]: I1209 15:11:29.259056 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-8qjxz" podUID="31afed1d-4e1c-491e-b54b-a5e7e24077f1" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" Dec 09 15:11:30 crc kubenswrapper[4716]: I1209 15:11:30.747118 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qgzhh" Dec 09 15:11:36 crc kubenswrapper[4716]: I1209 15:11:36.431606 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Dec 09 15:11:36 crc kubenswrapper[4716]: E1209 15:11:36.432578 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f916e62e-4d9d-44f9-bb4e-4ef8519ccc45" containerName="pruner" Dec 09 15:11:36 crc kubenswrapper[4716]: I1209 15:11:36.432591 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="f916e62e-4d9d-44f9-bb4e-4ef8519ccc45" containerName="pruner" Dec 09 15:11:36 crc kubenswrapper[4716]: E1209 15:11:36.432612 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cdb061c1-7c3f-4ece-90a1-7dbea1c6a604" containerName="pruner" Dec 09 15:11:36 crc kubenswrapper[4716]: I1209 15:11:36.432643 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="cdb061c1-7c3f-4ece-90a1-7dbea1c6a604" containerName="pruner" Dec 09 15:11:36 crc kubenswrapper[4716]: E1209 15:11:36.432657 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e0c79e5-04c3-4242-9472-0cb67c34b499" containerName="collect-profiles" Dec 09 15:11:36 crc kubenswrapper[4716]: I1209 15:11:36.432664 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e0c79e5-04c3-4242-9472-0cb67c34b499" containerName="collect-profiles" Dec 09 15:11:36 crc kubenswrapper[4716]: I1209 15:11:36.432799 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="cdb061c1-7c3f-4ece-90a1-7dbea1c6a604" containerName="pruner" Dec 09 15:11:36 crc kubenswrapper[4716]: I1209 15:11:36.432816 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="f916e62e-4d9d-44f9-bb4e-4ef8519ccc45" containerName="pruner" Dec 09 15:11:36 crc kubenswrapper[4716]: I1209 15:11:36.432827 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e0c79e5-04c3-4242-9472-0cb67c34b499" containerName="collect-profiles" Dec 09 15:11:36 crc kubenswrapper[4716]: I1209 15:11:36.433475 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 09 15:11:36 crc kubenswrapper[4716]: I1209 15:11:36.435108 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Dec 09 15:11:36 crc kubenswrapper[4716]: I1209 15:11:36.438721 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Dec 09 15:11:36 crc kubenswrapper[4716]: I1209 15:11:36.439005 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Dec 09 15:11:36 crc kubenswrapper[4716]: I1209 15:11:36.590148 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/33f4004a-7650-4877-8b5a-081996c83e9a-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"33f4004a-7650-4877-8b5a-081996c83e9a\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 09 15:11:36 crc kubenswrapper[4716]: I1209 15:11:36.590273 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/33f4004a-7650-4877-8b5a-081996c83e9a-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"33f4004a-7650-4877-8b5a-081996c83e9a\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 09 15:11:36 crc kubenswrapper[4716]: I1209 15:11:36.691837 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/33f4004a-7650-4877-8b5a-081996c83e9a-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"33f4004a-7650-4877-8b5a-081996c83e9a\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 09 15:11:36 crc kubenswrapper[4716]: I1209 15:11:36.691969 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/33f4004a-7650-4877-8b5a-081996c83e9a-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"33f4004a-7650-4877-8b5a-081996c83e9a\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 09 15:11:36 crc kubenswrapper[4716]: I1209 15:11:36.692091 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/33f4004a-7650-4877-8b5a-081996c83e9a-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"33f4004a-7650-4877-8b5a-081996c83e9a\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 09 15:11:36 crc kubenswrapper[4716]: I1209 15:11:36.714105 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/33f4004a-7650-4877-8b5a-081996c83e9a-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"33f4004a-7650-4877-8b5a-081996c83e9a\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 09 15:11:36 crc kubenswrapper[4716]: I1209 15:11:36.767139 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 09 15:11:39 crc kubenswrapper[4716]: I1209 15:11:39.256797 4716 patch_prober.go:28] interesting pod/downloads-7954f5f757-8qjxz container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" start-of-body= Dec 09 15:11:39 crc kubenswrapper[4716]: I1209 15:11:39.257341 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-8qjxz" podUID="31afed1d-4e1c-491e-b54b-a5e7e24077f1" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" Dec 09 15:11:39 crc kubenswrapper[4716]: I1209 15:11:39.347092 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 09 15:11:42 crc kubenswrapper[4716]: I1209 15:11:42.024690 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Dec 09 15:11:42 crc kubenswrapper[4716]: I1209 15:11:42.025887 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Dec 09 15:11:42 crc kubenswrapper[4716]: I1209 15:11:42.046607 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Dec 09 15:11:42 crc kubenswrapper[4716]: I1209 15:11:42.141248 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/91c555de-cffa-4f7b-90a0-df2c5d3b0855-kubelet-dir\") pod \"installer-9-crc\" (UID: \"91c555de-cffa-4f7b-90a0-df2c5d3b0855\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 09 15:11:42 crc kubenswrapper[4716]: I1209 15:11:42.141299 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/91c555de-cffa-4f7b-90a0-df2c5d3b0855-var-lock\") pod \"installer-9-crc\" (UID: \"91c555de-cffa-4f7b-90a0-df2c5d3b0855\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 09 15:11:42 crc kubenswrapper[4716]: I1209 15:11:42.141424 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/91c555de-cffa-4f7b-90a0-df2c5d3b0855-kube-api-access\") pod \"installer-9-crc\" (UID: \"91c555de-cffa-4f7b-90a0-df2c5d3b0855\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 09 15:11:42 crc kubenswrapper[4716]: I1209 15:11:42.242927 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/91c555de-cffa-4f7b-90a0-df2c5d3b0855-kube-api-access\") pod \"installer-9-crc\" (UID: \"91c555de-cffa-4f7b-90a0-df2c5d3b0855\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 09 15:11:42 crc kubenswrapper[4716]: I1209 15:11:42.242990 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/91c555de-cffa-4f7b-90a0-df2c5d3b0855-kubelet-dir\") pod \"installer-9-crc\" (UID: \"91c555de-cffa-4f7b-90a0-df2c5d3b0855\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 09 15:11:42 crc kubenswrapper[4716]: I1209 15:11:42.243007 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/91c555de-cffa-4f7b-90a0-df2c5d3b0855-var-lock\") pod \"installer-9-crc\" (UID: \"91c555de-cffa-4f7b-90a0-df2c5d3b0855\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 09 15:11:42 crc kubenswrapper[4716]: I1209 15:11:42.243104 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/91c555de-cffa-4f7b-90a0-df2c5d3b0855-var-lock\") pod \"installer-9-crc\" (UID: \"91c555de-cffa-4f7b-90a0-df2c5d3b0855\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 09 15:11:42 crc kubenswrapper[4716]: I1209 15:11:42.243147 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/91c555de-cffa-4f7b-90a0-df2c5d3b0855-kubelet-dir\") pod \"installer-9-crc\" (UID: \"91c555de-cffa-4f7b-90a0-df2c5d3b0855\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 09 15:11:42 crc kubenswrapper[4716]: I1209 15:11:42.264946 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/91c555de-cffa-4f7b-90a0-df2c5d3b0855-kube-api-access\") pod \"installer-9-crc\" (UID: \"91c555de-cffa-4f7b-90a0-df2c5d3b0855\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 09 15:11:42 crc kubenswrapper[4716]: I1209 15:11:42.354236 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Dec 09 15:11:42 crc kubenswrapper[4716]: E1209 15:11:42.571909 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Dec 09 15:11:42 crc kubenswrapper[4716]: E1209 15:11:42.572112 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-cq4hz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-v42h6_openshift-marketplace(07e1c69c-8b33-4342-8632-010554dfd1d5): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 09 15:11:42 crc kubenswrapper[4716]: E1209 15:11:42.573293 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-v42h6" podUID="07e1c69c-8b33-4342-8632-010554dfd1d5" Dec 09 15:11:42 crc kubenswrapper[4716]: E1209 15:11:42.585117 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Dec 09 15:11:42 crc kubenswrapper[4716]: E1209 15:11:42.585304 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fpm7d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-5jwpg_openshift-marketplace(85d4a173-8cf0-4f51-b713-6a8624461b61): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 09 15:11:42 crc kubenswrapper[4716]: E1209 15:11:42.586555 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-5jwpg" podUID="85d4a173-8cf0-4f51-b713-6a8624461b61" Dec 09 15:11:43 crc kubenswrapper[4716]: E1209 15:11:43.858771 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-5jwpg" podUID="85d4a173-8cf0-4f51-b713-6a8624461b61" Dec 09 15:11:43 crc kubenswrapper[4716]: E1209 15:11:43.858910 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-v42h6" podUID="07e1c69c-8b33-4342-8632-010554dfd1d5" Dec 09 15:11:43 crc kubenswrapper[4716]: E1209 15:11:43.927156 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Dec 09 15:11:43 crc kubenswrapper[4716]: E1209 15:11:43.927883 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mdlpn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-4zqlr_openshift-marketplace(3f60d900-cfcc-4840-8f6b-cdcb043a510b): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 09 15:11:43 crc kubenswrapper[4716]: E1209 15:11:43.929131 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-4zqlr" podUID="3f60d900-cfcc-4840-8f6b-cdcb043a510b" Dec 09 15:11:43 crc kubenswrapper[4716]: E1209 15:11:43.947460 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Dec 09 15:11:43 crc kubenswrapper[4716]: E1209 15:11:43.947786 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hg5hh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-f8rnc_openshift-marketplace(6e8a9e79-4b63-4c3f-968c-9524e682af80): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 09 15:11:43 crc kubenswrapper[4716]: E1209 15:11:43.949253 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-f8rnc" podUID="6e8a9e79-4b63-4c3f-968c-9524e682af80" Dec 09 15:11:44 crc kubenswrapper[4716]: I1209 15:11:44.467579 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-gpl2n"] Dec 09 15:11:45 crc kubenswrapper[4716]: E1209 15:11:45.416242 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-f8rnc" podUID="6e8a9e79-4b63-4c3f-968c-9524e682af80" Dec 09 15:11:45 crc kubenswrapper[4716]: E1209 15:11:45.416941 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-4zqlr" podUID="3f60d900-cfcc-4840-8f6b-cdcb043a510b" Dec 09 15:11:45 crc kubenswrapper[4716]: E1209 15:11:45.488546 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Dec 09 15:11:45 crc kubenswrapper[4716]: E1209 15:11:45.488779 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ms8q9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-dsxtv_openshift-marketplace(1fc78b58-085f-42f7-bec5-b28d0d2bc191): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 09 15:11:45 crc kubenswrapper[4716]: E1209 15:11:45.492273 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-dsxtv" podUID="1fc78b58-085f-42f7-bec5-b28d0d2bc191" Dec 09 15:11:45 crc kubenswrapper[4716]: E1209 15:11:45.500615 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Dec 09 15:11:45 crc kubenswrapper[4716]: E1209 15:11:45.500801 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kdx22,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-n8bgn_openshift-marketplace(e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 09 15:11:45 crc kubenswrapper[4716]: E1209 15:11:45.501969 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-n8bgn" podUID="e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3" Dec 09 15:11:47 crc kubenswrapper[4716]: E1209 15:11:47.243709 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-dsxtv" podUID="1fc78b58-085f-42f7-bec5-b28d0d2bc191" Dec 09 15:11:47 crc kubenswrapper[4716]: E1209 15:11:47.243853 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-n8bgn" podUID="e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3" Dec 09 15:11:47 crc kubenswrapper[4716]: W1209 15:11:47.248471 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf0e1f1bc_46bd_4293_a9b9_d57c1f83a613.slice/crio-f4819bb0f26f8932a79b60666fb4f6b8ec898719a473869b4504f0bd6661f372 WatchSource:0}: Error finding container f4819bb0f26f8932a79b60666fb4f6b8ec898719a473869b4504f0bd6661f372: Status 404 returned error can't find the container with id f4819bb0f26f8932a79b60666fb4f6b8ec898719a473869b4504f0bd6661f372 Dec 09 15:11:47 crc kubenswrapper[4716]: E1209 15:11:47.322323 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Dec 09 15:11:47 crc kubenswrapper[4716]: E1209 15:11:47.322603 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hpcnm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-bfn6b_openshift-marketplace(1cf42d24-d670-433b-8a60-11e6cadde0dd): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 09 15:11:47 crc kubenswrapper[4716]: E1209 15:11:47.331327 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-bfn6b" podUID="1cf42d24-d670-433b-8a60-11e6cadde0dd" Dec 09 15:11:47 crc kubenswrapper[4716]: E1209 15:11:47.338193 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Dec 09 15:11:47 crc kubenswrapper[4716]: E1209 15:11:47.338404 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wxb4v,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-6grj2_openshift-marketplace(02be1451-6780-479c-ab94-37503fea3645): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 09 15:11:47 crc kubenswrapper[4716]: E1209 15:11:47.339987 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-6grj2" podUID="02be1451-6780-479c-ab94-37503fea3645" Dec 09 15:11:47 crc kubenswrapper[4716]: I1209 15:11:47.689784 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Dec 09 15:11:47 crc kubenswrapper[4716]: W1209 15:11:47.699661 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod91c555de_cffa_4f7b_90a0_df2c5d3b0855.slice/crio-e058fc40d0d259bad99a51b2f36a278b4cb2c2d13e1fc7b4bc7c4eecc98d66fa WatchSource:0}: Error finding container e058fc40d0d259bad99a51b2f36a278b4cb2c2d13e1fc7b4bc7c4eecc98d66fa: Status 404 returned error can't find the container with id e058fc40d0d259bad99a51b2f36a278b4cb2c2d13e1fc7b4bc7c4eecc98d66fa Dec 09 15:11:47 crc kubenswrapper[4716]: I1209 15:11:47.756839 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Dec 09 15:11:47 crc kubenswrapper[4716]: I1209 15:11:47.922160 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 15:11:47 crc kubenswrapper[4716]: I1209 15:11:47.922236 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 15:11:47 crc kubenswrapper[4716]: I1209 15:11:47.991183 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"33f4004a-7650-4877-8b5a-081996c83e9a","Type":"ContainerStarted","Data":"0827bb087102ee37e4910d8d7e04365848a6afdf4db415530464377e2ae15347"} Dec 09 15:11:47 crc kubenswrapper[4716]: I1209 15:11:47.992501 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"91c555de-cffa-4f7b-90a0-df2c5d3b0855","Type":"ContainerStarted","Data":"e058fc40d0d259bad99a51b2f36a278b4cb2c2d13e1fc7b4bc7c4eecc98d66fa"} Dec 09 15:11:47 crc kubenswrapper[4716]: I1209 15:11:47.995642 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-8qjxz" event={"ID":"31afed1d-4e1c-491e-b54b-a5e7e24077f1","Type":"ContainerStarted","Data":"8e673760d4665d302b2985761936a7fee9eed8e734ad72867adc4d25c80d9449"} Dec 09 15:11:47 crc kubenswrapper[4716]: I1209 15:11:47.997114 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-8qjxz" Dec 09 15:11:47 crc kubenswrapper[4716]: I1209 15:11:47.997195 4716 patch_prober.go:28] interesting pod/downloads-7954f5f757-8qjxz container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" start-of-body= Dec 09 15:11:47 crc kubenswrapper[4716]: I1209 15:11:47.997227 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-8qjxz" podUID="31afed1d-4e1c-491e-b54b-a5e7e24077f1" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" Dec 09 15:11:48 crc kubenswrapper[4716]: I1209 15:11:48.001325 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-gpl2n" event={"ID":"f0e1f1bc-46bd-4293-a9b9-d57c1f83a613","Type":"ContainerStarted","Data":"dac0c75b101f79cd2f5a6cb6df2720c8edc0949e0042fb9649b43f4d07e10099"} Dec 09 15:11:48 crc kubenswrapper[4716]: I1209 15:11:48.001361 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-gpl2n" event={"ID":"f0e1f1bc-46bd-4293-a9b9-d57c1f83a613","Type":"ContainerStarted","Data":"a0ef8dc9fb99443ec503d94a17121117b089cced2be1f02be9f0b217153d0c11"} Dec 09 15:11:48 crc kubenswrapper[4716]: I1209 15:11:48.001374 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-gpl2n" event={"ID":"f0e1f1bc-46bd-4293-a9b9-d57c1f83a613","Type":"ContainerStarted","Data":"f4819bb0f26f8932a79b60666fb4f6b8ec898719a473869b4504f0bd6661f372"} Dec 09 15:11:48 crc kubenswrapper[4716]: E1209 15:11:48.002330 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-bfn6b" podUID="1cf42d24-d670-433b-8a60-11e6cadde0dd" Dec 09 15:11:48 crc kubenswrapper[4716]: E1209 15:11:48.002976 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-6grj2" podUID="02be1451-6780-479c-ab94-37503fea3645" Dec 09 15:11:48 crc kubenswrapper[4716]: I1209 15:11:48.076089 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-gpl2n" podStartSLOduration=172.076065311 podStartE2EDuration="2m52.076065311s" podCreationTimestamp="2025-12-09 15:08:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:11:48.075570777 +0000 UTC m=+195.230314765" watchObservedRunningTime="2025-12-09 15:11:48.076065311 +0000 UTC m=+195.230809299" Dec 09 15:11:49 crc kubenswrapper[4716]: I1209 15:11:49.009678 4716 generic.go:334] "Generic (PLEG): container finished" podID="33f4004a-7650-4877-8b5a-081996c83e9a" containerID="2492ec4b2e90b358fe6aa0ec952b0327481fd509452c7e1d34cbaf698de977fd" exitCode=0 Dec 09 15:11:49 crc kubenswrapper[4716]: I1209 15:11:49.009746 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"33f4004a-7650-4877-8b5a-081996c83e9a","Type":"ContainerDied","Data":"2492ec4b2e90b358fe6aa0ec952b0327481fd509452c7e1d34cbaf698de977fd"} Dec 09 15:11:49 crc kubenswrapper[4716]: I1209 15:11:49.012433 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"91c555de-cffa-4f7b-90a0-df2c5d3b0855","Type":"ContainerStarted","Data":"5b8f04bac7148a79c71e1eca6f1ecb83f30eb55c9e202cff32329d60534bd05c"} Dec 09 15:11:49 crc kubenswrapper[4716]: I1209 15:11:49.013132 4716 patch_prober.go:28] interesting pod/downloads-7954f5f757-8qjxz container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" start-of-body= Dec 09 15:11:49 crc kubenswrapper[4716]: I1209 15:11:49.013230 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-8qjxz" podUID="31afed1d-4e1c-491e-b54b-a5e7e24077f1" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" Dec 09 15:11:49 crc kubenswrapper[4716]: I1209 15:11:49.255571 4716 patch_prober.go:28] interesting pod/downloads-7954f5f757-8qjxz container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" start-of-body= Dec 09 15:11:49 crc kubenswrapper[4716]: I1209 15:11:49.255652 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-8qjxz" podUID="31afed1d-4e1c-491e-b54b-a5e7e24077f1" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" Dec 09 15:11:49 crc kubenswrapper[4716]: I1209 15:11:49.257530 4716 patch_prober.go:28] interesting pod/downloads-7954f5f757-8qjxz container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" start-of-body= Dec 09 15:11:49 crc kubenswrapper[4716]: I1209 15:11:49.257576 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-8qjxz" podUID="31afed1d-4e1c-491e-b54b-a5e7e24077f1" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" Dec 09 15:11:50 crc kubenswrapper[4716]: I1209 15:11:50.032612 4716 patch_prober.go:28] interesting pod/downloads-7954f5f757-8qjxz container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" start-of-body= Dec 09 15:11:50 crc kubenswrapper[4716]: I1209 15:11:50.032693 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-8qjxz" podUID="31afed1d-4e1c-491e-b54b-a5e7e24077f1" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.29:8080/\": dial tcp 10.217.0.29:8080: connect: connection refused" Dec 09 15:11:50 crc kubenswrapper[4716]: I1209 15:11:50.289321 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 09 15:11:50 crc kubenswrapper[4716]: I1209 15:11:50.316465 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=8.316444994 podStartE2EDuration="8.316444994s" podCreationTimestamp="2025-12-09 15:11:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:11:49.057944945 +0000 UTC m=+196.212688943" watchObservedRunningTime="2025-12-09 15:11:50.316444994 +0000 UTC m=+197.471188992" Dec 09 15:11:50 crc kubenswrapper[4716]: I1209 15:11:50.347962 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/33f4004a-7650-4877-8b5a-081996c83e9a-kube-api-access\") pod \"33f4004a-7650-4877-8b5a-081996c83e9a\" (UID: \"33f4004a-7650-4877-8b5a-081996c83e9a\") " Dec 09 15:11:50 crc kubenswrapper[4716]: I1209 15:11:50.348020 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/33f4004a-7650-4877-8b5a-081996c83e9a-kubelet-dir\") pod \"33f4004a-7650-4877-8b5a-081996c83e9a\" (UID: \"33f4004a-7650-4877-8b5a-081996c83e9a\") " Dec 09 15:11:50 crc kubenswrapper[4716]: I1209 15:11:50.348251 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/33f4004a-7650-4877-8b5a-081996c83e9a-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "33f4004a-7650-4877-8b5a-081996c83e9a" (UID: "33f4004a-7650-4877-8b5a-081996c83e9a"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 15:11:50 crc kubenswrapper[4716]: I1209 15:11:50.356951 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33f4004a-7650-4877-8b5a-081996c83e9a-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "33f4004a-7650-4877-8b5a-081996c83e9a" (UID: "33f4004a-7650-4877-8b5a-081996c83e9a"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:11:50 crc kubenswrapper[4716]: I1209 15:11:50.451068 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/33f4004a-7650-4877-8b5a-081996c83e9a-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 09 15:11:50 crc kubenswrapper[4716]: I1209 15:11:50.451107 4716 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/33f4004a-7650-4877-8b5a-081996c83e9a-kubelet-dir\") on node \"crc\" DevicePath \"\"" Dec 09 15:11:51 crc kubenswrapper[4716]: I1209 15:11:51.040561 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"33f4004a-7650-4877-8b5a-081996c83e9a","Type":"ContainerDied","Data":"0827bb087102ee37e4910d8d7e04365848a6afdf4db415530464377e2ae15347"} Dec 09 15:11:51 crc kubenswrapper[4716]: I1209 15:11:51.041531 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0827bb087102ee37e4910d8d7e04365848a6afdf4db415530464377e2ae15347" Dec 09 15:11:51 crc kubenswrapper[4716]: I1209 15:11:51.040637 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 09 15:11:59 crc kubenswrapper[4716]: I1209 15:11:59.273496 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-8qjxz" Dec 09 15:11:59 crc kubenswrapper[4716]: I1209 15:11:59.857313 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-bj2zv"] Dec 09 15:12:12 crc kubenswrapper[4716]: I1209 15:12:12.178467 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dsxtv" event={"ID":"1fc78b58-085f-42f7-bec5-b28d0d2bc191","Type":"ContainerStarted","Data":"2a6dfc84be839891caa06b48a43ab1b9fe99052d105fae7d6d529186f668827d"} Dec 09 15:12:12 crc kubenswrapper[4716]: I1209 15:12:12.182138 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f8rnc" event={"ID":"6e8a9e79-4b63-4c3f-968c-9524e682af80","Type":"ContainerStarted","Data":"9616f3e8fbf6246fddfb39c3e5cd0cec33371bae8dee5c048a9d3c059c0c08bf"} Dec 09 15:12:12 crc kubenswrapper[4716]: I1209 15:12:12.184076 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v42h6" event={"ID":"07e1c69c-8b33-4342-8632-010554dfd1d5","Type":"ContainerStarted","Data":"ab5651c5b058a34b3e3beb2670b610913033b142f2b0f0de6e9428d2206bdd0c"} Dec 09 15:12:12 crc kubenswrapper[4716]: I1209 15:12:12.186258 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6grj2" event={"ID":"02be1451-6780-479c-ab94-37503fea3645","Type":"ContainerStarted","Data":"c83dc178d19f9be51a0ccd17e650554adbcc5b4dc2734efcc78c92b4436a87f2"} Dec 09 15:12:12 crc kubenswrapper[4716]: I1209 15:12:12.193948 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bfn6b" event={"ID":"1cf42d24-d670-433b-8a60-11e6cadde0dd","Type":"ContainerStarted","Data":"7ccb29d9eaaff5cc12554a78c6f582140b6cd27eb15a06f2e24300f273d9ed0e"} Dec 09 15:12:12 crc kubenswrapper[4716]: I1209 15:12:12.233172 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4zqlr" event={"ID":"3f60d900-cfcc-4840-8f6b-cdcb043a510b","Type":"ContainerStarted","Data":"f9a1f90c369506af19e9df3341885b4142479a03d6dafbc1dfce6dcb44927759"} Dec 09 15:12:12 crc kubenswrapper[4716]: I1209 15:12:12.236474 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5jwpg" event={"ID":"85d4a173-8cf0-4f51-b713-6a8624461b61","Type":"ContainerStarted","Data":"089e2e968ce107555275c5f7739c492b0a981e8a30576f2f0eb1dccf29e2e157"} Dec 09 15:12:12 crc kubenswrapper[4716]: I1209 15:12:12.239110 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n8bgn" event={"ID":"e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3","Type":"ContainerStarted","Data":"a6f9fe25ab68fdc4dc52d6f71595b74db2fa6580c2868f988598e5d6f5ea3563"} Dec 09 15:12:15 crc kubenswrapper[4716]: I1209 15:12:15.313808 4716 generic.go:334] "Generic (PLEG): container finished" podID="e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3" containerID="a6f9fe25ab68fdc4dc52d6f71595b74db2fa6580c2868f988598e5d6f5ea3563" exitCode=0 Dec 09 15:12:15 crc kubenswrapper[4716]: I1209 15:12:15.313899 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n8bgn" event={"ID":"e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3","Type":"ContainerDied","Data":"a6f9fe25ab68fdc4dc52d6f71595b74db2fa6580c2868f988598e5d6f5ea3563"} Dec 09 15:12:15 crc kubenswrapper[4716]: I1209 15:12:15.321421 4716 generic.go:334] "Generic (PLEG): container finished" podID="1fc78b58-085f-42f7-bec5-b28d0d2bc191" containerID="2a6dfc84be839891caa06b48a43ab1b9fe99052d105fae7d6d529186f668827d" exitCode=0 Dec 09 15:12:15 crc kubenswrapper[4716]: I1209 15:12:15.321474 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dsxtv" event={"ID":"1fc78b58-085f-42f7-bec5-b28d0d2bc191","Type":"ContainerDied","Data":"2a6dfc84be839891caa06b48a43ab1b9fe99052d105fae7d6d529186f668827d"} Dec 09 15:12:17 crc kubenswrapper[4716]: I1209 15:12:17.423134 4716 generic.go:334] "Generic (PLEG): container finished" podID="6e8a9e79-4b63-4c3f-968c-9524e682af80" containerID="9616f3e8fbf6246fddfb39c3e5cd0cec33371bae8dee5c048a9d3c059c0c08bf" exitCode=0 Dec 09 15:12:17 crc kubenswrapper[4716]: I1209 15:12:17.423223 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f8rnc" event={"ID":"6e8a9e79-4b63-4c3f-968c-9524e682af80","Type":"ContainerDied","Data":"9616f3e8fbf6246fddfb39c3e5cd0cec33371bae8dee5c048a9d3c059c0c08bf"} Dec 09 15:12:17 crc kubenswrapper[4716]: I1209 15:12:17.432352 4716 generic.go:334] "Generic (PLEG): container finished" podID="02be1451-6780-479c-ab94-37503fea3645" containerID="c83dc178d19f9be51a0ccd17e650554adbcc5b4dc2734efcc78c92b4436a87f2" exitCode=0 Dec 09 15:12:17 crc kubenswrapper[4716]: I1209 15:12:17.432443 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6grj2" event={"ID":"02be1451-6780-479c-ab94-37503fea3645","Type":"ContainerDied","Data":"c83dc178d19f9be51a0ccd17e650554adbcc5b4dc2734efcc78c92b4436a87f2"} Dec 09 15:12:17 crc kubenswrapper[4716]: I1209 15:12:17.446732 4716 generic.go:334] "Generic (PLEG): container finished" podID="1cf42d24-d670-433b-8a60-11e6cadde0dd" containerID="7ccb29d9eaaff5cc12554a78c6f582140b6cd27eb15a06f2e24300f273d9ed0e" exitCode=0 Dec 09 15:12:17 crc kubenswrapper[4716]: I1209 15:12:17.446805 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bfn6b" event={"ID":"1cf42d24-d670-433b-8a60-11e6cadde0dd","Type":"ContainerDied","Data":"7ccb29d9eaaff5cc12554a78c6f582140b6cd27eb15a06f2e24300f273d9ed0e"} Dec 09 15:12:17 crc kubenswrapper[4716]: I1209 15:12:17.450095 4716 generic.go:334] "Generic (PLEG): container finished" podID="3f60d900-cfcc-4840-8f6b-cdcb043a510b" containerID="f9a1f90c369506af19e9df3341885b4142479a03d6dafbc1dfce6dcb44927759" exitCode=0 Dec 09 15:12:17 crc kubenswrapper[4716]: I1209 15:12:17.450127 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4zqlr" event={"ID":"3f60d900-cfcc-4840-8f6b-cdcb043a510b","Type":"ContainerDied","Data":"f9a1f90c369506af19e9df3341885b4142479a03d6dafbc1dfce6dcb44927759"} Dec 09 15:12:17 crc kubenswrapper[4716]: I1209 15:12:17.922015 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 15:12:17 crc kubenswrapper[4716]: I1209 15:12:17.922102 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 15:12:17 crc kubenswrapper[4716]: I1209 15:12:17.922175 4716 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" Dec 09 15:12:17 crc kubenswrapper[4716]: I1209 15:12:17.922958 4716 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce"} pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 15:12:17 crc kubenswrapper[4716]: I1209 15:12:17.923029 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" containerID="cri-o://28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce" gracePeriod=600 Dec 09 15:12:18 crc kubenswrapper[4716]: I1209 15:12:18.463017 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n8bgn" event={"ID":"e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3","Type":"ContainerStarted","Data":"63be79a46108bbcee26991617a2f26a64e81eaa15ba40da0e4b588940190ff4f"} Dec 09 15:12:18 crc kubenswrapper[4716]: I1209 15:12:18.465635 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dsxtv" event={"ID":"1fc78b58-085f-42f7-bec5-b28d0d2bc191","Type":"ContainerStarted","Data":"f509522db616bbeae8680f6d8f9a149d7b756f08ace1a4caab509ac375d07714"} Dec 09 15:12:18 crc kubenswrapper[4716]: I1209 15:12:18.510556 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-n8bgn" podStartSLOduration=6.129889604 podStartE2EDuration="1m20.510520445s" podCreationTimestamp="2025-12-09 15:10:58 +0000 UTC" firstStartedPulling="2025-12-09 15:11:02.061162718 +0000 UTC m=+149.215906716" lastFinishedPulling="2025-12-09 15:12:16.441793569 +0000 UTC m=+223.596537557" observedRunningTime="2025-12-09 15:12:18.490575008 +0000 UTC m=+225.645319006" watchObservedRunningTime="2025-12-09 15:12:18.510520445 +0000 UTC m=+225.665264433" Dec 09 15:12:18 crc kubenswrapper[4716]: I1209 15:12:18.514729 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-dsxtv" podStartSLOduration=4.955285232 podStartE2EDuration="1m20.514700774s" podCreationTimestamp="2025-12-09 15:10:58 +0000 UTC" firstStartedPulling="2025-12-09 15:11:00.749161678 +0000 UTC m=+147.903905676" lastFinishedPulling="2025-12-09 15:12:16.30857723 +0000 UTC m=+223.463321218" observedRunningTime="2025-12-09 15:12:18.508813397 +0000 UTC m=+225.663557395" watchObservedRunningTime="2025-12-09 15:12:18.514700774 +0000 UTC m=+225.669444762" Dec 09 15:12:19 crc kubenswrapper[4716]: I1209 15:12:19.194211 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-n8bgn" Dec 09 15:12:19 crc kubenswrapper[4716]: I1209 15:12:19.194673 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-n8bgn" Dec 09 15:12:19 crc kubenswrapper[4716]: I1209 15:12:19.443656 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-dsxtv" Dec 09 15:12:19 crc kubenswrapper[4716]: I1209 15:12:19.443734 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-dsxtv" Dec 09 15:12:19 crc kubenswrapper[4716]: I1209 15:12:19.475702 4716 generic.go:334] "Generic (PLEG): container finished" podID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerID="28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce" exitCode=0 Dec 09 15:12:19 crc kubenswrapper[4716]: I1209 15:12:19.475780 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerDied","Data":"28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce"} Dec 09 15:12:20 crc kubenswrapper[4716]: I1209 15:12:20.520132 4716 generic.go:334] "Generic (PLEG): container finished" podID="07e1c69c-8b33-4342-8632-010554dfd1d5" containerID="ab5651c5b058a34b3e3beb2670b610913033b142f2b0f0de6e9428d2206bdd0c" exitCode=0 Dec 09 15:12:20 crc kubenswrapper[4716]: I1209 15:12:20.520653 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v42h6" event={"ID":"07e1c69c-8b33-4342-8632-010554dfd1d5","Type":"ContainerDied","Data":"ab5651c5b058a34b3e3beb2670b610913033b142f2b0f0de6e9428d2206bdd0c"} Dec 09 15:12:20 crc kubenswrapper[4716]: I1209 15:12:20.530202 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerStarted","Data":"4c6e76c9ca1315384ba9656c51a130473b399015c21d538dc1786fee74edb164"} Dec 09 15:12:20 crc kubenswrapper[4716]: I1209 15:12:20.550748 4716 generic.go:334] "Generic (PLEG): container finished" podID="85d4a173-8cf0-4f51-b713-6a8624461b61" containerID="089e2e968ce107555275c5f7739c492b0a981e8a30576f2f0eb1dccf29e2e157" exitCode=0 Dec 09 15:12:20 crc kubenswrapper[4716]: I1209 15:12:20.550869 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5jwpg" event={"ID":"85d4a173-8cf0-4f51-b713-6a8624461b61","Type":"ContainerDied","Data":"089e2e968ce107555275c5f7739c492b0a981e8a30576f2f0eb1dccf29e2e157"} Dec 09 15:12:20 crc kubenswrapper[4716]: I1209 15:12:20.936346 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-n8bgn" podUID="e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3" containerName="registry-server" probeResult="failure" output=< Dec 09 15:12:20 crc kubenswrapper[4716]: timeout: failed to connect service ":50051" within 1s Dec 09 15:12:20 crc kubenswrapper[4716]: > Dec 09 15:12:20 crc kubenswrapper[4716]: I1209 15:12:20.938107 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-dsxtv" podUID="1fc78b58-085f-42f7-bec5-b28d0d2bc191" containerName="registry-server" probeResult="failure" output=< Dec 09 15:12:20 crc kubenswrapper[4716]: timeout: failed to connect service ":50051" within 1s Dec 09 15:12:20 crc kubenswrapper[4716]: > Dec 09 15:12:21 crc kubenswrapper[4716]: I1209 15:12:21.558595 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f8rnc" event={"ID":"6e8a9e79-4b63-4c3f-968c-9524e682af80","Type":"ContainerStarted","Data":"50d5c3cb77dda1a27c7bf642211449fd91e602a0d9c72c62dfc0325a21b9ddea"} Dec 09 15:12:21 crc kubenswrapper[4716]: I1209 15:12:21.560416 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v42h6" event={"ID":"07e1c69c-8b33-4342-8632-010554dfd1d5","Type":"ContainerStarted","Data":"86ec669b334f1f8428099c358a5874f0c67dd1eaec4ceb5b5bd3b8f03df3cd7c"} Dec 09 15:12:21 crc kubenswrapper[4716]: I1209 15:12:21.562676 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6grj2" event={"ID":"02be1451-6780-479c-ab94-37503fea3645","Type":"ContainerStarted","Data":"6bd0c4b854e56de90699d8d0d76ab06929112419f2d0565228fc83584b40774f"} Dec 09 15:12:21 crc kubenswrapper[4716]: I1209 15:12:21.564829 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bfn6b" event={"ID":"1cf42d24-d670-433b-8a60-11e6cadde0dd","Type":"ContainerStarted","Data":"9745c35448b658c3e07a00924fdb91a8cec39e5bef15b139aafc23ba470eba09"} Dec 09 15:12:21 crc kubenswrapper[4716]: I1209 15:12:21.566892 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4zqlr" event={"ID":"3f60d900-cfcc-4840-8f6b-cdcb043a510b","Type":"ContainerStarted","Data":"7df4f90351c5e5d9d3c57257cc5fd6557a31cc1cd13daf6f75e25277e4aeb073"} Dec 09 15:12:21 crc kubenswrapper[4716]: I1209 15:12:21.570171 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5jwpg" event={"ID":"85d4a173-8cf0-4f51-b713-6a8624461b61","Type":"ContainerStarted","Data":"2ce17d4f6f5bb72b5cc3c0e8133d8f4194518093419d32bc323a823c5874abbc"} Dec 09 15:12:21 crc kubenswrapper[4716]: I1209 15:12:21.583953 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-f8rnc" podStartSLOduration=4.708462009 podStartE2EDuration="1m25.583934649s" podCreationTimestamp="2025-12-09 15:10:56 +0000 UTC" firstStartedPulling="2025-12-09 15:10:59.720231668 +0000 UTC m=+146.874975656" lastFinishedPulling="2025-12-09 15:12:20.595704308 +0000 UTC m=+227.750448296" observedRunningTime="2025-12-09 15:12:21.581124269 +0000 UTC m=+228.735868257" watchObservedRunningTime="2025-12-09 15:12:21.583934649 +0000 UTC m=+228.738678637" Dec 09 15:12:21 crc kubenswrapper[4716]: I1209 15:12:21.614483 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-4zqlr" podStartSLOduration=3.8412947170000002 podStartE2EDuration="1m24.614463197s" podCreationTimestamp="2025-12-09 15:10:57 +0000 UTC" firstStartedPulling="2025-12-09 15:10:59.719866048 +0000 UTC m=+146.874610036" lastFinishedPulling="2025-12-09 15:12:20.493034528 +0000 UTC m=+227.647778516" observedRunningTime="2025-12-09 15:12:21.609032423 +0000 UTC m=+228.763776411" watchObservedRunningTime="2025-12-09 15:12:21.614463197 +0000 UTC m=+228.769207185" Dec 09 15:12:21 crc kubenswrapper[4716]: I1209 15:12:21.635374 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-bfn6b" podStartSLOduration=4.952074671 podStartE2EDuration="1m24.635347191s" podCreationTimestamp="2025-12-09 15:10:57 +0000 UTC" firstStartedPulling="2025-12-09 15:11:01.034272734 +0000 UTC m=+148.189016732" lastFinishedPulling="2025-12-09 15:12:20.717545274 +0000 UTC m=+227.872289252" observedRunningTime="2025-12-09 15:12:21.630946756 +0000 UTC m=+228.785690744" watchObservedRunningTime="2025-12-09 15:12:21.635347191 +0000 UTC m=+228.790091179" Dec 09 15:12:21 crc kubenswrapper[4716]: I1209 15:12:21.661026 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5jwpg" podStartSLOduration=2.69234675 podStartE2EDuration="1m21.661002861s" podCreationTimestamp="2025-12-09 15:11:00 +0000 UTC" firstStartedPulling="2025-12-09 15:11:02.019553352 +0000 UTC m=+149.174297340" lastFinishedPulling="2025-12-09 15:12:20.988209463 +0000 UTC m=+228.142953451" observedRunningTime="2025-12-09 15:12:21.656223475 +0000 UTC m=+228.810967463" watchObservedRunningTime="2025-12-09 15:12:21.661002861 +0000 UTC m=+228.815746849" Dec 09 15:12:21 crc kubenswrapper[4716]: I1209 15:12:21.676051 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-6grj2" podStartSLOduration=6.158372591 podStartE2EDuration="1m25.676030559s" podCreationTimestamp="2025-12-09 15:10:56 +0000 UTC" firstStartedPulling="2025-12-09 15:11:00.848317996 +0000 UTC m=+148.003061994" lastFinishedPulling="2025-12-09 15:12:20.365975974 +0000 UTC m=+227.520719962" observedRunningTime="2025-12-09 15:12:21.674910837 +0000 UTC m=+228.829654825" watchObservedRunningTime="2025-12-09 15:12:21.676030559 +0000 UTC m=+228.830774547" Dec 09 15:12:24 crc kubenswrapper[4716]: I1209 15:12:24.892767 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" podUID="867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" containerName="oauth-openshift" containerID="cri-o://c379e1443debaff54159b74ac5292f7f66c9435c831ab16f7e14d768136df020" gracePeriod=15 Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.599929 4716 generic.go:334] "Generic (PLEG): container finished" podID="867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" containerID="c379e1443debaff54159b74ac5292f7f66c9435c831ab16f7e14d768136df020" exitCode=0 Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.600416 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" event={"ID":"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4","Type":"ContainerDied","Data":"c379e1443debaff54159b74ac5292f7f66c9435c831ab16f7e14d768136df020"} Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.725717 4716 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Dec 09 15:12:25 crc kubenswrapper[4716]: E1209 15:12:25.726121 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33f4004a-7650-4877-8b5a-081996c83e9a" containerName="pruner" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.726139 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="33f4004a-7650-4877-8b5a-081996c83e9a" containerName="pruner" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.726295 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="33f4004a-7650-4877-8b5a-081996c83e9a" containerName="pruner" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.726733 4716 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.727049 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4" gracePeriod=15 Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.727250 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.727798 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e" gracePeriod=15 Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.727860 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575" gracePeriod=15 Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.727938 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db" gracePeriod=15 Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.728048 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23" gracePeriod=15 Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.728746 4716 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Dec 09 15:12:25 crc kubenswrapper[4716]: E1209 15:12:25.729035 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.729059 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 09 15:12:25 crc kubenswrapper[4716]: E1209 15:12:25.729076 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.729087 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Dec 09 15:12:25 crc kubenswrapper[4716]: E1209 15:12:25.729103 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.729112 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Dec 09 15:12:25 crc kubenswrapper[4716]: E1209 15:12:25.729124 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.729132 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Dec 09 15:12:25 crc kubenswrapper[4716]: E1209 15:12:25.729149 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.729158 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Dec 09 15:12:25 crc kubenswrapper[4716]: E1209 15:12:25.729167 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.729175 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.729314 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.729327 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.729337 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.729348 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.729363 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Dec 09 15:12:25 crc kubenswrapper[4716]: E1209 15:12:25.729501 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.729511 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.729691 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.834740 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.834796 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.834843 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.834860 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.834902 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.834919 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.834936 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.834956 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.919949 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.921864 4716 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.922204 4716 status_manager.go:851] "Failed to get status for pod" podUID="867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bj2zv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.936387 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.936452 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.936476 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.936504 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.936533 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.936706 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.936762 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.936747 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.936814 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.936829 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.936851 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.936841 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.936951 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.938903 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.936974 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 15:12:25 crc kubenswrapper[4716]: I1209 15:12:25.939054 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.039490 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-user-idp-0-file-data\") pod \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.039559 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-snswq\" (UniqueName: \"kubernetes.io/projected/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-kube-api-access-snswq\") pod \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.039586 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-user-template-provider-selection\") pod \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.039637 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-cliconfig\") pod \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.039676 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-ocp-branding-template\") pod \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.039706 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-audit-policies\") pod \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.039733 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-audit-dir\") pod \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.039762 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-service-ca\") pod \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.039800 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-trusted-ca-bundle\") pod \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.039830 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-user-template-error\") pod \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.039861 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-serving-cert\") pod \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.039893 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-user-template-login\") pod \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.039926 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-session\") pod \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.039955 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-router-certs\") pod \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\" (UID: \"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4\") " Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.039945 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" (UID: "867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.040172 4716 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-audit-dir\") on node \"crc\" DevicePath \"\"" Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.040943 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" (UID: "867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.041100 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" (UID: "867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.043997 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" (UID: "867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.044038 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" (UID: "867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.049757 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-kube-api-access-snswq" (OuterVolumeSpecName: "kube-api-access-snswq") pod "867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" (UID: "867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4"). InnerVolumeSpecName "kube-api-access-snswq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.049914 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" (UID: "867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.050370 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" (UID: "867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.050516 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" (UID: "867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.055099 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" (UID: "867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.055656 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" (UID: "867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.056389 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" (UID: "867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.056966 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" (UID: "867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.061640 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" (UID: "867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.141316 4716 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.141373 4716 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.141385 4716 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.141396 4716 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.141407 4716 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.141417 4716 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.141428 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-snswq\" (UniqueName: \"kubernetes.io/projected/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-kube-api-access-snswq\") on node \"crc\" DevicePath \"\"" Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.141442 4716 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.141762 4716 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.141778 4716 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.141790 4716 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-audit-policies\") on node \"crc\" DevicePath \"\"" Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.141799 4716 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.141812 4716 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.608772 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.609692 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.610494 4716 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db" exitCode=2 Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.612006 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" event={"ID":"867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4","Type":"ContainerDied","Data":"0c25f27379bb242954552c4b3178d9bfec53e85ba511c7880f315c33900982f1"} Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.612044 4716 scope.go:117] "RemoveContainer" containerID="c379e1443debaff54159b74ac5292f7f66c9435c831ab16f7e14d768136df020" Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.612146 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.613390 4716 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.613754 4716 status_manager.go:851] "Failed to get status for pod" podUID="867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bj2zv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.629709 4716 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:26 crc kubenswrapper[4716]: I1209 15:12:26.630220 4716 status_manager.go:851] "Failed to get status for pod" podUID="867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bj2zv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:27 crc kubenswrapper[4716]: I1209 15:12:27.619825 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Dec 09 15:12:27 crc kubenswrapper[4716]: I1209 15:12:27.621106 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 09 15:12:27 crc kubenswrapper[4716]: I1209 15:12:27.621798 4716 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575" exitCode=0 Dec 09 15:12:27 crc kubenswrapper[4716]: I1209 15:12:27.797122 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-4zqlr" Dec 09 15:12:27 crc kubenswrapper[4716]: I1209 15:12:27.797225 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-4zqlr" Dec 09 15:12:27 crc kubenswrapper[4716]: I1209 15:12:27.797273 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-f8rnc" Dec 09 15:12:27 crc kubenswrapper[4716]: I1209 15:12:27.797292 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-f8rnc" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.019988 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-f8rnc" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.021007 4716 status_manager.go:851] "Failed to get status for pod" podUID="867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bj2zv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.021197 4716 status_manager.go:851] "Failed to get status for pod" podUID="6e8a9e79-4b63-4c3f-968c-9524e682af80" pod="openshift-marketplace/community-operators-f8rnc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-f8rnc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.025084 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-4zqlr" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.025446 4716 status_manager.go:851] "Failed to get status for pod" podUID="867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bj2zv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.025650 4716 status_manager.go:851] "Failed to get status for pod" podUID="3f60d900-cfcc-4840-8f6b-cdcb043a510b" pod="openshift-marketplace/community-operators-4zqlr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-4zqlr\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.025918 4716 status_manager.go:851] "Failed to get status for pod" podUID="6e8a9e79-4b63-4c3f-968c-9524e682af80" pod="openshift-marketplace/community-operators-f8rnc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-f8rnc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: E1209 15:12:28.201335 4716 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: E1209 15:12:28.202637 4716 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: E1209 15:12:28.203231 4716 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: E1209 15:12:28.203617 4716 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: E1209 15:12:28.204118 4716 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.204155 4716 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Dec 09 15:12:28 crc kubenswrapper[4716]: E1209 15:12:28.204426 4716 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.66:6443: connect: connection refused" interval="200ms" Dec 09 15:12:28 crc kubenswrapper[4716]: E1209 15:12:28.406129 4716 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.66:6443: connect: connection refused" interval="400ms" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.478215 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-6grj2" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.478532 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-6grj2" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.478772 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-bfn6b" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.478800 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-bfn6b" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.520098 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-6grj2" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.520657 4716 status_manager.go:851] "Failed to get status for pod" podUID="6e8a9e79-4b63-4c3f-968c-9524e682af80" pod="openshift-marketplace/community-operators-f8rnc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-f8rnc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.521060 4716 status_manager.go:851] "Failed to get status for pod" podUID="02be1451-6780-479c-ab94-37503fea3645" pod="openshift-marketplace/certified-operators-6grj2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-6grj2\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.521662 4716 status_manager.go:851] "Failed to get status for pod" podUID="867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bj2zv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.521890 4716 status_manager.go:851] "Failed to get status for pod" podUID="3f60d900-cfcc-4840-8f6b-cdcb043a510b" pod="openshift-marketplace/community-operators-4zqlr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-4zqlr\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.527526 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-bfn6b" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.527922 4716 status_manager.go:851] "Failed to get status for pod" podUID="3f60d900-cfcc-4840-8f6b-cdcb043a510b" pod="openshift-marketplace/community-operators-4zqlr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-4zqlr\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.528232 4716 status_manager.go:851] "Failed to get status for pod" podUID="1cf42d24-d670-433b-8a60-11e6cadde0dd" pod="openshift-marketplace/certified-operators-bfn6b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-bfn6b\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.528594 4716 status_manager.go:851] "Failed to get status for pod" podUID="6e8a9e79-4b63-4c3f-968c-9524e682af80" pod="openshift-marketplace/community-operators-f8rnc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-f8rnc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.529209 4716 status_manager.go:851] "Failed to get status for pod" podUID="02be1451-6780-479c-ab94-37503fea3645" pod="openshift-marketplace/certified-operators-6grj2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-6grj2\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.529424 4716 status_manager.go:851] "Failed to get status for pod" podUID="867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bj2zv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.633468 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.635388 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.636410 4716 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e" exitCode=0 Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.636468 4716 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23" exitCode=0 Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.636479 4716 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4" exitCode=0 Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.636551 4716 scope.go:117] "RemoveContainer" containerID="ef45f12f75c79ff9ac8b86a7e2680c245796dfe0f0a2c50bf87bd6289ed98fd4" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.639412 4716 generic.go:334] "Generic (PLEG): container finished" podID="91c555de-cffa-4f7b-90a0-df2c5d3b0855" containerID="5b8f04bac7148a79c71e1eca6f1ecb83f30eb55c9e202cff32329d60534bd05c" exitCode=0 Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.639662 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"91c555de-cffa-4f7b-90a0-df2c5d3b0855","Type":"ContainerDied","Data":"5b8f04bac7148a79c71e1eca6f1ecb83f30eb55c9e202cff32329d60534bd05c"} Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.641101 4716 status_manager.go:851] "Failed to get status for pod" podUID="3f60d900-cfcc-4840-8f6b-cdcb043a510b" pod="openshift-marketplace/community-operators-4zqlr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-4zqlr\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.641447 4716 status_manager.go:851] "Failed to get status for pod" podUID="1cf42d24-d670-433b-8a60-11e6cadde0dd" pod="openshift-marketplace/certified-operators-bfn6b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-bfn6b\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.641796 4716 status_manager.go:851] "Failed to get status for pod" podUID="6e8a9e79-4b63-4c3f-968c-9524e682af80" pod="openshift-marketplace/community-operators-f8rnc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-f8rnc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.642153 4716 status_manager.go:851] "Failed to get status for pod" podUID="02be1451-6780-479c-ab94-37503fea3645" pod="openshift-marketplace/certified-operators-6grj2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-6grj2\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.642474 4716 status_manager.go:851] "Failed to get status for pod" podUID="91c555de-cffa-4f7b-90a0-df2c5d3b0855" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.642741 4716 status_manager.go:851] "Failed to get status for pod" podUID="867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bj2zv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.693884 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-4zqlr" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.694600 4716 status_manager.go:851] "Failed to get status for pod" podUID="867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bj2zv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.694794 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-bfn6b" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.694972 4716 status_manager.go:851] "Failed to get status for pod" podUID="3f60d900-cfcc-4840-8f6b-cdcb043a510b" pod="openshift-marketplace/community-operators-4zqlr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-4zqlr\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.695222 4716 status_manager.go:851] "Failed to get status for pod" podUID="1cf42d24-d670-433b-8a60-11e6cadde0dd" pod="openshift-marketplace/certified-operators-bfn6b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-bfn6b\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.695420 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-6grj2" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.695572 4716 status_manager.go:851] "Failed to get status for pod" podUID="91c555de-cffa-4f7b-90a0-df2c5d3b0855" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.695866 4716 status_manager.go:851] "Failed to get status for pod" podUID="6e8a9e79-4b63-4c3f-968c-9524e682af80" pod="openshift-marketplace/community-operators-f8rnc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-f8rnc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.696420 4716 status_manager.go:851] "Failed to get status for pod" podUID="02be1451-6780-479c-ab94-37503fea3645" pod="openshift-marketplace/certified-operators-6grj2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-6grj2\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.696854 4716 status_manager.go:851] "Failed to get status for pod" podUID="867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bj2zv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.697113 4716 status_manager.go:851] "Failed to get status for pod" podUID="3f60d900-cfcc-4840-8f6b-cdcb043a510b" pod="openshift-marketplace/community-operators-4zqlr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-4zqlr\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.697552 4716 status_manager.go:851] "Failed to get status for pod" podUID="1cf42d24-d670-433b-8a60-11e6cadde0dd" pod="openshift-marketplace/certified-operators-bfn6b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-bfn6b\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.697789 4716 status_manager.go:851] "Failed to get status for pod" podUID="6e8a9e79-4b63-4c3f-968c-9524e682af80" pod="openshift-marketplace/community-operators-f8rnc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-f8rnc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.697953 4716 status_manager.go:851] "Failed to get status for pod" podUID="02be1451-6780-479c-ab94-37503fea3645" pod="openshift-marketplace/certified-operators-6grj2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-6grj2\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.698103 4716 status_manager.go:851] "Failed to get status for pod" podUID="91c555de-cffa-4f7b-90a0-df2c5d3b0855" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.707773 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-f8rnc" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.708532 4716 status_manager.go:851] "Failed to get status for pod" podUID="867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bj2zv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.708940 4716 status_manager.go:851] "Failed to get status for pod" podUID="3f60d900-cfcc-4840-8f6b-cdcb043a510b" pod="openshift-marketplace/community-operators-4zqlr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-4zqlr\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.709230 4716 status_manager.go:851] "Failed to get status for pod" podUID="1cf42d24-d670-433b-8a60-11e6cadde0dd" pod="openshift-marketplace/certified-operators-bfn6b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-bfn6b\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.709504 4716 status_manager.go:851] "Failed to get status for pod" podUID="6e8a9e79-4b63-4c3f-968c-9524e682af80" pod="openshift-marketplace/community-operators-f8rnc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-f8rnc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.709761 4716 status_manager.go:851] "Failed to get status for pod" podUID="02be1451-6780-479c-ab94-37503fea3645" pod="openshift-marketplace/certified-operators-6grj2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-6grj2\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.709934 4716 status_manager.go:851] "Failed to get status for pod" podUID="91c555de-cffa-4f7b-90a0-df2c5d3b0855" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.717038 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.717894 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.718588 4716 status_manager.go:851] "Failed to get status for pod" podUID="6e8a9e79-4b63-4c3f-968c-9524e682af80" pod="openshift-marketplace/community-operators-f8rnc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-f8rnc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.718898 4716 status_manager.go:851] "Failed to get status for pod" podUID="02be1451-6780-479c-ab94-37503fea3645" pod="openshift-marketplace/certified-operators-6grj2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-6grj2\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.719128 4716 status_manager.go:851] "Failed to get status for pod" podUID="91c555de-cffa-4f7b-90a0-df2c5d3b0855" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.719347 4716 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.719577 4716 status_manager.go:851] "Failed to get status for pod" podUID="867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bj2zv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.719879 4716 status_manager.go:851] "Failed to get status for pod" podUID="3f60d900-cfcc-4840-8f6b-cdcb043a510b" pod="openshift-marketplace/community-operators-4zqlr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-4zqlr\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.720307 4716 status_manager.go:851] "Failed to get status for pod" podUID="1cf42d24-d670-433b-8a60-11e6cadde0dd" pod="openshift-marketplace/certified-operators-bfn6b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-bfn6b\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:28 crc kubenswrapper[4716]: E1209 15:12:28.806686 4716 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.66:6443: connect: connection refused" interval="800ms" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.878311 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.878420 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.878535 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.878570 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.878649 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.878747 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.878968 4716 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.878987 4716 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Dec 09 15:12:28 crc kubenswrapper[4716]: I1209 15:12:28.879001 4716 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Dec 09 15:12:28 crc kubenswrapper[4716]: E1209 15:12:28.998974 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:12:28Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:12:28Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:12:28Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-09T15:12:28Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"}]}}\" for node \"crc\": Patch \"https://api-int.crc.testing:6443/api/v1/nodes/crc/status?timeout=10s\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: E1209 15:12:29.001173 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: E1209 15:12:29.001531 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: E1209 15:12:29.001899 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: E1209 15:12:29.002320 4716 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: E1209 15:12:29.002425 4716 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.168314 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-n8bgn" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.168850 4716 status_manager.go:851] "Failed to get status for pod" podUID="e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3" pod="openshift-marketplace/redhat-marketplace-n8bgn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-n8bgn\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.169135 4716 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.169567 4716 status_manager.go:851] "Failed to get status for pod" podUID="867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bj2zv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.170168 4716 status_manager.go:851] "Failed to get status for pod" podUID="1cf42d24-d670-433b-8a60-11e6cadde0dd" pod="openshift-marketplace/certified-operators-bfn6b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-bfn6b\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.170496 4716 status_manager.go:851] "Failed to get status for pod" podUID="3f60d900-cfcc-4840-8f6b-cdcb043a510b" pod="openshift-marketplace/community-operators-4zqlr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-4zqlr\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.170872 4716 status_manager.go:851] "Failed to get status for pod" podUID="6e8a9e79-4b63-4c3f-968c-9524e682af80" pod="openshift-marketplace/community-operators-f8rnc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-f8rnc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.171123 4716 status_manager.go:851] "Failed to get status for pod" podUID="02be1451-6780-479c-ab94-37503fea3645" pod="openshift-marketplace/certified-operators-6grj2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-6grj2\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.171277 4716 status_manager.go:851] "Failed to get status for pod" podUID="91c555de-cffa-4f7b-90a0-df2c5d3b0855" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.211906 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-n8bgn" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.212509 4716 status_manager.go:851] "Failed to get status for pod" podUID="6e8a9e79-4b63-4c3f-968c-9524e682af80" pod="openshift-marketplace/community-operators-f8rnc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-f8rnc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.212815 4716 status_manager.go:851] "Failed to get status for pod" podUID="02be1451-6780-479c-ab94-37503fea3645" pod="openshift-marketplace/certified-operators-6grj2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-6grj2\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.213055 4716 status_manager.go:851] "Failed to get status for pod" podUID="91c555de-cffa-4f7b-90a0-df2c5d3b0855" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.213273 4716 status_manager.go:851] "Failed to get status for pod" podUID="e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3" pod="openshift-marketplace/redhat-marketplace-n8bgn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-n8bgn\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.213488 4716 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.213712 4716 status_manager.go:851] "Failed to get status for pod" podUID="867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bj2zv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.214015 4716 status_manager.go:851] "Failed to get status for pod" podUID="3f60d900-cfcc-4840-8f6b-cdcb043a510b" pod="openshift-marketplace/community-operators-4zqlr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-4zqlr\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.214231 4716 status_manager.go:851] "Failed to get status for pod" podUID="1cf42d24-d670-433b-8a60-11e6cadde0dd" pod="openshift-marketplace/certified-operators-bfn6b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-bfn6b\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.221144 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.486274 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-dsxtv" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.489351 4716 status_manager.go:851] "Failed to get status for pod" podUID="02be1451-6780-479c-ab94-37503fea3645" pod="openshift-marketplace/certified-operators-6grj2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-6grj2\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.489742 4716 status_manager.go:851] "Failed to get status for pod" podUID="91c555de-cffa-4f7b-90a0-df2c5d3b0855" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.490108 4716 status_manager.go:851] "Failed to get status for pod" podUID="6e8a9e79-4b63-4c3f-968c-9524e682af80" pod="openshift-marketplace/community-operators-f8rnc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-f8rnc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.490275 4716 status_manager.go:851] "Failed to get status for pod" podUID="e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3" pod="openshift-marketplace/redhat-marketplace-n8bgn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-n8bgn\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.490423 4716 status_manager.go:851] "Failed to get status for pod" podUID="1fc78b58-085f-42f7-bec5-b28d0d2bc191" pod="openshift-marketplace/redhat-marketplace-dsxtv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dsxtv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.490566 4716 status_manager.go:851] "Failed to get status for pod" podUID="867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bj2zv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.490724 4716 status_manager.go:851] "Failed to get status for pod" podUID="3f60d900-cfcc-4840-8f6b-cdcb043a510b" pod="openshift-marketplace/community-operators-4zqlr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-4zqlr\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.490862 4716 status_manager.go:851] "Failed to get status for pod" podUID="1cf42d24-d670-433b-8a60-11e6cadde0dd" pod="openshift-marketplace/certified-operators-bfn6b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-bfn6b\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.528379 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-dsxtv" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.528958 4716 status_manager.go:851] "Failed to get status for pod" podUID="e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3" pod="openshift-marketplace/redhat-marketplace-n8bgn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-n8bgn\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.529196 4716 status_manager.go:851] "Failed to get status for pod" podUID="1fc78b58-085f-42f7-bec5-b28d0d2bc191" pod="openshift-marketplace/redhat-marketplace-dsxtv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dsxtv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.529532 4716 status_manager.go:851] "Failed to get status for pod" podUID="867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bj2zv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.530075 4716 status_manager.go:851] "Failed to get status for pod" podUID="1cf42d24-d670-433b-8a60-11e6cadde0dd" pod="openshift-marketplace/certified-operators-bfn6b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-bfn6b\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.530511 4716 status_manager.go:851] "Failed to get status for pod" podUID="3f60d900-cfcc-4840-8f6b-cdcb043a510b" pod="openshift-marketplace/community-operators-4zqlr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-4zqlr\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.530768 4716 status_manager.go:851] "Failed to get status for pod" podUID="6e8a9e79-4b63-4c3f-968c-9524e682af80" pod="openshift-marketplace/community-operators-f8rnc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-f8rnc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.531054 4716 status_manager.go:851] "Failed to get status for pod" podUID="02be1451-6780-479c-ab94-37503fea3645" pod="openshift-marketplace/certified-operators-6grj2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-6grj2\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.531380 4716 status_manager.go:851] "Failed to get status for pod" podUID="91c555de-cffa-4f7b-90a0-df2c5d3b0855" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: E1209 15:12:29.607593 4716 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.66:6443: connect: connection refused" interval="1.6s" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.647080 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.647826 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.647931 4716 scope.go:117] "RemoveContainer" containerID="ec5ebf6e6b3e738a6820e18d64f061d4b5d189ca8679ff4022d8fb067152356e" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.651874 4716 status_manager.go:851] "Failed to get status for pod" podUID="e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3" pod="openshift-marketplace/redhat-marketplace-n8bgn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-n8bgn\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.652192 4716 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.652413 4716 status_manager.go:851] "Failed to get status for pod" podUID="1fc78b58-085f-42f7-bec5-b28d0d2bc191" pod="openshift-marketplace/redhat-marketplace-dsxtv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dsxtv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.652651 4716 status_manager.go:851] "Failed to get status for pod" podUID="867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bj2zv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.652943 4716 status_manager.go:851] "Failed to get status for pod" podUID="3f60d900-cfcc-4840-8f6b-cdcb043a510b" pod="openshift-marketplace/community-operators-4zqlr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-4zqlr\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.653255 4716 status_manager.go:851] "Failed to get status for pod" podUID="1cf42d24-d670-433b-8a60-11e6cadde0dd" pod="openshift-marketplace/certified-operators-bfn6b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-bfn6b\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.653685 4716 status_manager.go:851] "Failed to get status for pod" podUID="6e8a9e79-4b63-4c3f-968c-9524e682af80" pod="openshift-marketplace/community-operators-f8rnc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-f8rnc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.653919 4716 status_manager.go:851] "Failed to get status for pod" podUID="02be1451-6780-479c-ab94-37503fea3645" pod="openshift-marketplace/certified-operators-6grj2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-6grj2\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.654134 4716 status_manager.go:851] "Failed to get status for pod" podUID="91c555de-cffa-4f7b-90a0-df2c5d3b0855" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.660744 4716 status_manager.go:851] "Failed to get status for pod" podUID="1fc78b58-085f-42f7-bec5-b28d0d2bc191" pod="openshift-marketplace/redhat-marketplace-dsxtv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dsxtv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.661785 4716 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.662453 4716 status_manager.go:851] "Failed to get status for pod" podUID="867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bj2zv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.663006 4716 status_manager.go:851] "Failed to get status for pod" podUID="3f60d900-cfcc-4840-8f6b-cdcb043a510b" pod="openshift-marketplace/community-operators-4zqlr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-4zqlr\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.663349 4716 status_manager.go:851] "Failed to get status for pod" podUID="1cf42d24-d670-433b-8a60-11e6cadde0dd" pod="openshift-marketplace/certified-operators-bfn6b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-bfn6b\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.663849 4716 status_manager.go:851] "Failed to get status for pod" podUID="6e8a9e79-4b63-4c3f-968c-9524e682af80" pod="openshift-marketplace/community-operators-f8rnc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-f8rnc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.664223 4716 status_manager.go:851] "Failed to get status for pod" podUID="02be1451-6780-479c-ab94-37503fea3645" pod="openshift-marketplace/certified-operators-6grj2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-6grj2\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.665038 4716 status_manager.go:851] "Failed to get status for pod" podUID="91c555de-cffa-4f7b-90a0-df2c5d3b0855" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.665475 4716 status_manager.go:851] "Failed to get status for pod" podUID="e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3" pod="openshift-marketplace/redhat-marketplace-n8bgn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-n8bgn\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.673685 4716 scope.go:117] "RemoveContainer" containerID="de714fdad8286e21cc8d8ae205a4f6865cd6d327506b01afef5b7b20e6126575" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.687646 4716 scope.go:117] "RemoveContainer" containerID="c3d97194633f1887068404036685bbf70a27eb6bc84c9bb87b244fa0bec73e23" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.705968 4716 scope.go:117] "RemoveContainer" containerID="e47627c99c7b7a15eaa4d3b56a99ffd3269597092f56f1fa9616aee5ca9b28db" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.720926 4716 scope.go:117] "RemoveContainer" containerID="a50bbf3259e992348606e70b847812ffcdf20ec0ce302ba9055d76df86a328c4" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.742368 4716 scope.go:117] "RemoveContainer" containerID="966307613c1e898081ccc1ba642c119b702a8262747d43cf00ac036d007b408b" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.917190 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.917893 4716 status_manager.go:851] "Failed to get status for pod" podUID="e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3" pod="openshift-marketplace/redhat-marketplace-n8bgn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-n8bgn\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.918312 4716 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.919020 4716 status_manager.go:851] "Failed to get status for pod" podUID="1fc78b58-085f-42f7-bec5-b28d0d2bc191" pod="openshift-marketplace/redhat-marketplace-dsxtv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dsxtv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.919378 4716 status_manager.go:851] "Failed to get status for pod" podUID="867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bj2zv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.919764 4716 status_manager.go:851] "Failed to get status for pod" podUID="3f60d900-cfcc-4840-8f6b-cdcb043a510b" pod="openshift-marketplace/community-operators-4zqlr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-4zqlr\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.920051 4716 status_manager.go:851] "Failed to get status for pod" podUID="1cf42d24-d670-433b-8a60-11e6cadde0dd" pod="openshift-marketplace/certified-operators-bfn6b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-bfn6b\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.920319 4716 status_manager.go:851] "Failed to get status for pod" podUID="6e8a9e79-4b63-4c3f-968c-9524e682af80" pod="openshift-marketplace/community-operators-f8rnc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-f8rnc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.920654 4716 status_manager.go:851] "Failed to get status for pod" podUID="02be1451-6780-479c-ab94-37503fea3645" pod="openshift-marketplace/certified-operators-6grj2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-6grj2\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:29 crc kubenswrapper[4716]: I1209 15:12:29.921052 4716 status_manager.go:851] "Failed to get status for pod" podUID="91c555de-cffa-4f7b-90a0-df2c5d3b0855" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.102707 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/91c555de-cffa-4f7b-90a0-df2c5d3b0855-kubelet-dir\") pod \"91c555de-cffa-4f7b-90a0-df2c5d3b0855\" (UID: \"91c555de-cffa-4f7b-90a0-df2c5d3b0855\") " Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.103402 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/91c555de-cffa-4f7b-90a0-df2c5d3b0855-kube-api-access\") pod \"91c555de-cffa-4f7b-90a0-df2c5d3b0855\" (UID: \"91c555de-cffa-4f7b-90a0-df2c5d3b0855\") " Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.103443 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/91c555de-cffa-4f7b-90a0-df2c5d3b0855-var-lock\") pod \"91c555de-cffa-4f7b-90a0-df2c5d3b0855\" (UID: \"91c555de-cffa-4f7b-90a0-df2c5d3b0855\") " Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.102892 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/91c555de-cffa-4f7b-90a0-df2c5d3b0855-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "91c555de-cffa-4f7b-90a0-df2c5d3b0855" (UID: "91c555de-cffa-4f7b-90a0-df2c5d3b0855"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.103650 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/91c555de-cffa-4f7b-90a0-df2c5d3b0855-var-lock" (OuterVolumeSpecName: "var-lock") pod "91c555de-cffa-4f7b-90a0-df2c5d3b0855" (UID: "91c555de-cffa-4f7b-90a0-df2c5d3b0855"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.103780 4716 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/91c555de-cffa-4f7b-90a0-df2c5d3b0855-kubelet-dir\") on node \"crc\" DevicePath \"\"" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.103797 4716 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/91c555de-cffa-4f7b-90a0-df2c5d3b0855-var-lock\") on node \"crc\" DevicePath \"\"" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.113338 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/91c555de-cffa-4f7b-90a0-df2c5d3b0855-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "91c555de-cffa-4f7b-90a0-df2c5d3b0855" (UID: "91c555de-cffa-4f7b-90a0-df2c5d3b0855"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.205241 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/91c555de-cffa-4f7b-90a0-df2c5d3b0855-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 09 15:12:30 crc kubenswrapper[4716]: E1209 15:12:30.307166 4716 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openshift-image-registry/crc-image-registry-storage: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/persistentvolumeclaims/crc-image-registry-storage\": dial tcp 38.102.83.66:6443: connect: connection refused" pod="openshift-image-registry/image-registry-697d97f7c8-ht662" volumeName="registry-storage" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.377275 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-v42h6" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.377355 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-v42h6" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.405153 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5jwpg" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.405359 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5jwpg" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.429114 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-v42h6" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.429771 4716 status_manager.go:851] "Failed to get status for pod" podUID="867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bj2zv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.430262 4716 status_manager.go:851] "Failed to get status for pod" podUID="3f60d900-cfcc-4840-8f6b-cdcb043a510b" pod="openshift-marketplace/community-operators-4zqlr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-4zqlr\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.430534 4716 status_manager.go:851] "Failed to get status for pod" podUID="1cf42d24-d670-433b-8a60-11e6cadde0dd" pod="openshift-marketplace/certified-operators-bfn6b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-bfn6b\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.430766 4716 status_manager.go:851] "Failed to get status for pod" podUID="6e8a9e79-4b63-4c3f-968c-9524e682af80" pod="openshift-marketplace/community-operators-f8rnc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-f8rnc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.431023 4716 status_manager.go:851] "Failed to get status for pod" podUID="02be1451-6780-479c-ab94-37503fea3645" pod="openshift-marketplace/certified-operators-6grj2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-6grj2\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.431295 4716 status_manager.go:851] "Failed to get status for pod" podUID="91c555de-cffa-4f7b-90a0-df2c5d3b0855" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.431504 4716 status_manager.go:851] "Failed to get status for pod" podUID="07e1c69c-8b33-4342-8632-010554dfd1d5" pod="openshift-marketplace/redhat-operators-v42h6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-v42h6\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.431732 4716 status_manager.go:851] "Failed to get status for pod" podUID="e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3" pod="openshift-marketplace/redhat-marketplace-n8bgn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-n8bgn\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.431972 4716 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.432193 4716 status_manager.go:851] "Failed to get status for pod" podUID="1fc78b58-085f-42f7-bec5-b28d0d2bc191" pod="openshift-marketplace/redhat-marketplace-dsxtv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dsxtv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.451220 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5jwpg" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.451924 4716 status_manager.go:851] "Failed to get status for pod" podUID="3f60d900-cfcc-4840-8f6b-cdcb043a510b" pod="openshift-marketplace/community-operators-4zqlr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-4zqlr\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.452426 4716 status_manager.go:851] "Failed to get status for pod" podUID="1cf42d24-d670-433b-8a60-11e6cadde0dd" pod="openshift-marketplace/certified-operators-bfn6b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-bfn6b\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.453152 4716 status_manager.go:851] "Failed to get status for pod" podUID="85d4a173-8cf0-4f51-b713-6a8624461b61" pod="openshift-marketplace/redhat-operators-5jwpg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-5jwpg\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.453524 4716 status_manager.go:851] "Failed to get status for pod" podUID="6e8a9e79-4b63-4c3f-968c-9524e682af80" pod="openshift-marketplace/community-operators-f8rnc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-f8rnc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.453981 4716 status_manager.go:851] "Failed to get status for pod" podUID="02be1451-6780-479c-ab94-37503fea3645" pod="openshift-marketplace/certified-operators-6grj2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-6grj2\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.454356 4716 status_manager.go:851] "Failed to get status for pod" podUID="91c555de-cffa-4f7b-90a0-df2c5d3b0855" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.454801 4716 status_manager.go:851] "Failed to get status for pod" podUID="07e1c69c-8b33-4342-8632-010554dfd1d5" pod="openshift-marketplace/redhat-operators-v42h6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-v42h6\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.455128 4716 status_manager.go:851] "Failed to get status for pod" podUID="e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3" pod="openshift-marketplace/redhat-marketplace-n8bgn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-n8bgn\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.455457 4716 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.455859 4716 status_manager.go:851] "Failed to get status for pod" podUID="1fc78b58-085f-42f7-bec5-b28d0d2bc191" pod="openshift-marketplace/redhat-marketplace-dsxtv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dsxtv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.456294 4716 status_manager.go:851] "Failed to get status for pod" podUID="867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bj2zv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.656344 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.656338 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"91c555de-cffa-4f7b-90a0-df2c5d3b0855","Type":"ContainerDied","Data":"e058fc40d0d259bad99a51b2f36a278b4cb2c2d13e1fc7b4bc7c4eecc98d66fa"} Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.656417 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e058fc40d0d259bad99a51b2f36a278b4cb2c2d13e1fc7b4bc7c4eecc98d66fa" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.670154 4716 status_manager.go:851] "Failed to get status for pod" podUID="e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3" pod="openshift-marketplace/redhat-marketplace-n8bgn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-n8bgn\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.670339 4716 status_manager.go:851] "Failed to get status for pod" podUID="1fc78b58-085f-42f7-bec5-b28d0d2bc191" pod="openshift-marketplace/redhat-marketplace-dsxtv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dsxtv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.670540 4716 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.670728 4716 status_manager.go:851] "Failed to get status for pod" podUID="867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bj2zv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.670920 4716 status_manager.go:851] "Failed to get status for pod" podUID="85d4a173-8cf0-4f51-b713-6a8624461b61" pod="openshift-marketplace/redhat-operators-5jwpg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-5jwpg\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.671132 4716 status_manager.go:851] "Failed to get status for pod" podUID="3f60d900-cfcc-4840-8f6b-cdcb043a510b" pod="openshift-marketplace/community-operators-4zqlr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-4zqlr\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.671291 4716 status_manager.go:851] "Failed to get status for pod" podUID="1cf42d24-d670-433b-8a60-11e6cadde0dd" pod="openshift-marketplace/certified-operators-bfn6b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-bfn6b\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.671446 4716 status_manager.go:851] "Failed to get status for pod" podUID="6e8a9e79-4b63-4c3f-968c-9524e682af80" pod="openshift-marketplace/community-operators-f8rnc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-f8rnc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.671642 4716 status_manager.go:851] "Failed to get status for pod" podUID="02be1451-6780-479c-ab94-37503fea3645" pod="openshift-marketplace/certified-operators-6grj2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-6grj2\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.671805 4716 status_manager.go:851] "Failed to get status for pod" podUID="91c555de-cffa-4f7b-90a0-df2c5d3b0855" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.671972 4716 status_manager.go:851] "Failed to get status for pod" podUID="07e1c69c-8b33-4342-8632-010554dfd1d5" pod="openshift-marketplace/redhat-operators-v42h6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-v42h6\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.703892 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-v42h6" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.704155 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5jwpg" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.704401 4716 status_manager.go:851] "Failed to get status for pod" podUID="3f60d900-cfcc-4840-8f6b-cdcb043a510b" pod="openshift-marketplace/community-operators-4zqlr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-4zqlr\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.704608 4716 status_manager.go:851] "Failed to get status for pod" podUID="1cf42d24-d670-433b-8a60-11e6cadde0dd" pod="openshift-marketplace/certified-operators-bfn6b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-bfn6b\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.704840 4716 status_manager.go:851] "Failed to get status for pod" podUID="85d4a173-8cf0-4f51-b713-6a8624461b61" pod="openshift-marketplace/redhat-operators-5jwpg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-5jwpg\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.705084 4716 status_manager.go:851] "Failed to get status for pod" podUID="6e8a9e79-4b63-4c3f-968c-9524e682af80" pod="openshift-marketplace/community-operators-f8rnc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-f8rnc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.705323 4716 status_manager.go:851] "Failed to get status for pod" podUID="02be1451-6780-479c-ab94-37503fea3645" pod="openshift-marketplace/certified-operators-6grj2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-6grj2\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.705497 4716 status_manager.go:851] "Failed to get status for pod" podUID="91c555de-cffa-4f7b-90a0-df2c5d3b0855" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.705666 4716 status_manager.go:851] "Failed to get status for pod" podUID="07e1c69c-8b33-4342-8632-010554dfd1d5" pod="openshift-marketplace/redhat-operators-v42h6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-v42h6\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.705990 4716 status_manager.go:851] "Failed to get status for pod" podUID="e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3" pod="openshift-marketplace/redhat-marketplace-n8bgn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-n8bgn\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.706399 4716 status_manager.go:851] "Failed to get status for pod" podUID="1fc78b58-085f-42f7-bec5-b28d0d2bc191" pod="openshift-marketplace/redhat-marketplace-dsxtv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dsxtv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.706681 4716 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.706950 4716 status_manager.go:851] "Failed to get status for pod" podUID="867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bj2zv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.707326 4716 status_manager.go:851] "Failed to get status for pod" podUID="867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bj2zv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.707582 4716 status_manager.go:851] "Failed to get status for pod" podUID="3f60d900-cfcc-4840-8f6b-cdcb043a510b" pod="openshift-marketplace/community-operators-4zqlr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-4zqlr\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.707845 4716 status_manager.go:851] "Failed to get status for pod" podUID="1cf42d24-d670-433b-8a60-11e6cadde0dd" pod="openshift-marketplace/certified-operators-bfn6b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-bfn6b\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.708117 4716 status_manager.go:851] "Failed to get status for pod" podUID="85d4a173-8cf0-4f51-b713-6a8624461b61" pod="openshift-marketplace/redhat-operators-5jwpg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-5jwpg\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.708361 4716 status_manager.go:851] "Failed to get status for pod" podUID="6e8a9e79-4b63-4c3f-968c-9524e682af80" pod="openshift-marketplace/community-operators-f8rnc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-f8rnc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.708616 4716 status_manager.go:851] "Failed to get status for pod" podUID="02be1451-6780-479c-ab94-37503fea3645" pod="openshift-marketplace/certified-operators-6grj2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-6grj2\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.708906 4716 status_manager.go:851] "Failed to get status for pod" podUID="91c555de-cffa-4f7b-90a0-df2c5d3b0855" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.709143 4716 status_manager.go:851] "Failed to get status for pod" podUID="07e1c69c-8b33-4342-8632-010554dfd1d5" pod="openshift-marketplace/redhat-operators-v42h6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-v42h6\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.709458 4716 status_manager.go:851] "Failed to get status for pod" podUID="e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3" pod="openshift-marketplace/redhat-marketplace-n8bgn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-n8bgn\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.709712 4716 status_manager.go:851] "Failed to get status for pod" podUID="1fc78b58-085f-42f7-bec5-b28d0d2bc191" pod="openshift-marketplace/redhat-marketplace-dsxtv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dsxtv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.710012 4716 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:30 crc kubenswrapper[4716]: E1209 15:12:30.777754 4716 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.66:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 15:12:30 crc kubenswrapper[4716]: I1209 15:12:30.778372 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 15:12:30 crc kubenswrapper[4716]: W1209 15:12:30.810194 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf85e55b1a89d02b0cb034b1ea31ed45a.slice/crio-1fbb05e2ca33714c517fc49ebe45eaadfd7f48e17c33e7e659053695e4342d97 WatchSource:0}: Error finding container 1fbb05e2ca33714c517fc49ebe45eaadfd7f48e17c33e7e659053695e4342d97: Status 404 returned error can't find the container with id 1fbb05e2ca33714c517fc49ebe45eaadfd7f48e17c33e7e659053695e4342d97 Dec 09 15:12:30 crc kubenswrapper[4716]: E1209 15:12:30.813908 4716 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.66:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187f94c48aa9925e openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-09 15:12:30.813393502 +0000 UTC m=+237.968137490,LastTimestamp:2025-12-09 15:12:30.813393502 +0000 UTC m=+237.968137490,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 09 15:12:31 crc kubenswrapper[4716]: E1209 15:12:31.208887 4716 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.66:6443: connect: connection refused" interval="3.2s" Dec 09 15:12:31 crc kubenswrapper[4716]: I1209 15:12:31.664935 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"d22ce3df36cb7eb1fed6424becbf0321a0f5b3a793aeb67f134d5361c53a4229"} Dec 09 15:12:31 crc kubenswrapper[4716]: I1209 15:12:31.665009 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"1fbb05e2ca33714c517fc49ebe45eaadfd7f48e17c33e7e659053695e4342d97"} Dec 09 15:12:31 crc kubenswrapper[4716]: E1209 15:12:31.666121 4716 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.66:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 15:12:31 crc kubenswrapper[4716]: I1209 15:12:31.667036 4716 status_manager.go:851] "Failed to get status for pod" podUID="02be1451-6780-479c-ab94-37503fea3645" pod="openshift-marketplace/certified-operators-6grj2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-6grj2\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:31 crc kubenswrapper[4716]: I1209 15:12:31.667382 4716 status_manager.go:851] "Failed to get status for pod" podUID="91c555de-cffa-4f7b-90a0-df2c5d3b0855" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:31 crc kubenswrapper[4716]: I1209 15:12:31.667669 4716 status_manager.go:851] "Failed to get status for pod" podUID="6e8a9e79-4b63-4c3f-968c-9524e682af80" pod="openshift-marketplace/community-operators-f8rnc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-f8rnc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:31 crc kubenswrapper[4716]: I1209 15:12:31.667881 4716 status_manager.go:851] "Failed to get status for pod" podUID="07e1c69c-8b33-4342-8632-010554dfd1d5" pod="openshift-marketplace/redhat-operators-v42h6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-v42h6\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:31 crc kubenswrapper[4716]: I1209 15:12:31.668051 4716 status_manager.go:851] "Failed to get status for pod" podUID="e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3" pod="openshift-marketplace/redhat-marketplace-n8bgn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-n8bgn\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:31 crc kubenswrapper[4716]: I1209 15:12:31.668227 4716 status_manager.go:851] "Failed to get status for pod" podUID="1fc78b58-085f-42f7-bec5-b28d0d2bc191" pod="openshift-marketplace/redhat-marketplace-dsxtv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dsxtv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:31 crc kubenswrapper[4716]: I1209 15:12:31.668387 4716 status_manager.go:851] "Failed to get status for pod" podUID="867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bj2zv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:31 crc kubenswrapper[4716]: I1209 15:12:31.668537 4716 status_manager.go:851] "Failed to get status for pod" podUID="3f60d900-cfcc-4840-8f6b-cdcb043a510b" pod="openshift-marketplace/community-operators-4zqlr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-4zqlr\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:31 crc kubenswrapper[4716]: I1209 15:12:31.668742 4716 status_manager.go:851] "Failed to get status for pod" podUID="1cf42d24-d670-433b-8a60-11e6cadde0dd" pod="openshift-marketplace/certified-operators-bfn6b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-bfn6b\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:31 crc kubenswrapper[4716]: I1209 15:12:31.668932 4716 status_manager.go:851] "Failed to get status for pod" podUID="85d4a173-8cf0-4f51-b713-6a8624461b61" pod="openshift-marketplace/redhat-operators-5jwpg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-5jwpg\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:32 crc kubenswrapper[4716]: E1209 15:12:32.910750 4716 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.66:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187f94c48aa9925e openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-09 15:12:30.813393502 +0000 UTC m=+237.968137490,LastTimestamp:2025-12-09 15:12:30.813393502 +0000 UTC m=+237.968137490,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 09 15:12:33 crc kubenswrapper[4716]: I1209 15:12:33.216714 4716 status_manager.go:851] "Failed to get status for pod" podUID="6e8a9e79-4b63-4c3f-968c-9524e682af80" pod="openshift-marketplace/community-operators-f8rnc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-f8rnc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:33 crc kubenswrapper[4716]: I1209 15:12:33.217241 4716 status_manager.go:851] "Failed to get status for pod" podUID="02be1451-6780-479c-ab94-37503fea3645" pod="openshift-marketplace/certified-operators-6grj2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-6grj2\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:33 crc kubenswrapper[4716]: I1209 15:12:33.217892 4716 status_manager.go:851] "Failed to get status for pod" podUID="91c555de-cffa-4f7b-90a0-df2c5d3b0855" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:33 crc kubenswrapper[4716]: I1209 15:12:33.219107 4716 status_manager.go:851] "Failed to get status for pod" podUID="07e1c69c-8b33-4342-8632-010554dfd1d5" pod="openshift-marketplace/redhat-operators-v42h6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-v42h6\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:33 crc kubenswrapper[4716]: I1209 15:12:33.219732 4716 status_manager.go:851] "Failed to get status for pod" podUID="e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3" pod="openshift-marketplace/redhat-marketplace-n8bgn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-n8bgn\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:33 crc kubenswrapper[4716]: I1209 15:12:33.220243 4716 status_manager.go:851] "Failed to get status for pod" podUID="1fc78b58-085f-42f7-bec5-b28d0d2bc191" pod="openshift-marketplace/redhat-marketplace-dsxtv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dsxtv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:33 crc kubenswrapper[4716]: I1209 15:12:33.220510 4716 status_manager.go:851] "Failed to get status for pod" podUID="867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bj2zv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:33 crc kubenswrapper[4716]: I1209 15:12:33.220782 4716 status_manager.go:851] "Failed to get status for pod" podUID="3f60d900-cfcc-4840-8f6b-cdcb043a510b" pod="openshift-marketplace/community-operators-4zqlr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-4zqlr\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:33 crc kubenswrapper[4716]: I1209 15:12:33.221307 4716 status_manager.go:851] "Failed to get status for pod" podUID="1cf42d24-d670-433b-8a60-11e6cadde0dd" pod="openshift-marketplace/certified-operators-bfn6b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-bfn6b\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:33 crc kubenswrapper[4716]: I1209 15:12:33.221932 4716 status_manager.go:851] "Failed to get status for pod" podUID="85d4a173-8cf0-4f51-b713-6a8624461b61" pod="openshift-marketplace/redhat-operators-5jwpg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-5jwpg\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:34 crc kubenswrapper[4716]: E1209 15:12:34.409835 4716 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.66:6443: connect: connection refused" interval="6.4s" Dec 09 15:12:37 crc kubenswrapper[4716]: I1209 15:12:37.213254 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:12:37 crc kubenswrapper[4716]: I1209 15:12:37.214899 4716 status_manager.go:851] "Failed to get status for pod" podUID="6e8a9e79-4b63-4c3f-968c-9524e682af80" pod="openshift-marketplace/community-operators-f8rnc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-f8rnc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:37 crc kubenswrapper[4716]: I1209 15:12:37.215343 4716 status_manager.go:851] "Failed to get status for pod" podUID="02be1451-6780-479c-ab94-37503fea3645" pod="openshift-marketplace/certified-operators-6grj2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-6grj2\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:37 crc kubenswrapper[4716]: I1209 15:12:37.216663 4716 status_manager.go:851] "Failed to get status for pod" podUID="91c555de-cffa-4f7b-90a0-df2c5d3b0855" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:37 crc kubenswrapper[4716]: I1209 15:12:37.217242 4716 status_manager.go:851] "Failed to get status for pod" podUID="07e1c69c-8b33-4342-8632-010554dfd1d5" pod="openshift-marketplace/redhat-operators-v42h6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-v42h6\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:37 crc kubenswrapper[4716]: I1209 15:12:37.217577 4716 status_manager.go:851] "Failed to get status for pod" podUID="e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3" pod="openshift-marketplace/redhat-marketplace-n8bgn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-n8bgn\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:37 crc kubenswrapper[4716]: I1209 15:12:37.218091 4716 status_manager.go:851] "Failed to get status for pod" podUID="1fc78b58-085f-42f7-bec5-b28d0d2bc191" pod="openshift-marketplace/redhat-marketplace-dsxtv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dsxtv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:37 crc kubenswrapper[4716]: I1209 15:12:37.218337 4716 status_manager.go:851] "Failed to get status for pod" podUID="867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bj2zv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:37 crc kubenswrapper[4716]: I1209 15:12:37.220181 4716 status_manager.go:851] "Failed to get status for pod" podUID="3f60d900-cfcc-4840-8f6b-cdcb043a510b" pod="openshift-marketplace/community-operators-4zqlr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-4zqlr\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:37 crc kubenswrapper[4716]: I1209 15:12:37.220468 4716 status_manager.go:851] "Failed to get status for pod" podUID="1cf42d24-d670-433b-8a60-11e6cadde0dd" pod="openshift-marketplace/certified-operators-bfn6b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-bfn6b\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:37 crc kubenswrapper[4716]: I1209 15:12:37.220688 4716 status_manager.go:851] "Failed to get status for pod" podUID="85d4a173-8cf0-4f51-b713-6a8624461b61" pod="openshift-marketplace/redhat-operators-5jwpg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-5jwpg\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:37 crc kubenswrapper[4716]: I1209 15:12:37.232211 4716 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="e7ed35d3-6b08-4863-a641-e63313d65d8a" Dec 09 15:12:37 crc kubenswrapper[4716]: I1209 15:12:37.232363 4716 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="e7ed35d3-6b08-4863-a641-e63313d65d8a" Dec 09 15:12:37 crc kubenswrapper[4716]: E1209 15:12:37.233495 4716 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.66:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:12:37 crc kubenswrapper[4716]: I1209 15:12:37.234293 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:12:37 crc kubenswrapper[4716]: I1209 15:12:37.702046 4716 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="e9c69a4128608d2ba7ad6db3ae26faa259c61adcc72a06963789cc71fe6f45b3" exitCode=0 Dec 09 15:12:37 crc kubenswrapper[4716]: I1209 15:12:37.702125 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"e9c69a4128608d2ba7ad6db3ae26faa259c61adcc72a06963789cc71fe6f45b3"} Dec 09 15:12:37 crc kubenswrapper[4716]: I1209 15:12:37.702182 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"fdc04361c0aaf5fccd2e2fc179f9ac0d0d4bc58f0a84c1910ed299fa6fa42416"} Dec 09 15:12:37 crc kubenswrapper[4716]: I1209 15:12:37.702605 4716 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="e7ed35d3-6b08-4863-a641-e63313d65d8a" Dec 09 15:12:37 crc kubenswrapper[4716]: I1209 15:12:37.702661 4716 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="e7ed35d3-6b08-4863-a641-e63313d65d8a" Dec 09 15:12:37 crc kubenswrapper[4716]: E1209 15:12:37.703266 4716 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.66:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:12:37 crc kubenswrapper[4716]: I1209 15:12:37.703317 4716 status_manager.go:851] "Failed to get status for pod" podUID="e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3" pod="openshift-marketplace/redhat-marketplace-n8bgn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-n8bgn\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:37 crc kubenswrapper[4716]: I1209 15:12:37.703811 4716 status_manager.go:851] "Failed to get status for pod" podUID="1fc78b58-085f-42f7-bec5-b28d0d2bc191" pod="openshift-marketplace/redhat-marketplace-dsxtv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dsxtv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:37 crc kubenswrapper[4716]: I1209 15:12:37.704497 4716 status_manager.go:851] "Failed to get status for pod" podUID="867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" pod="openshift-authentication/oauth-openshift-558db77b4-bj2zv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bj2zv\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:37 crc kubenswrapper[4716]: I1209 15:12:37.704891 4716 status_manager.go:851] "Failed to get status for pod" podUID="3f60d900-cfcc-4840-8f6b-cdcb043a510b" pod="openshift-marketplace/community-operators-4zqlr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-4zqlr\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:37 crc kubenswrapper[4716]: I1209 15:12:37.705202 4716 status_manager.go:851] "Failed to get status for pod" podUID="1cf42d24-d670-433b-8a60-11e6cadde0dd" pod="openshift-marketplace/certified-operators-bfn6b" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-bfn6b\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:37 crc kubenswrapper[4716]: I1209 15:12:37.705492 4716 status_manager.go:851] "Failed to get status for pod" podUID="85d4a173-8cf0-4f51-b713-6a8624461b61" pod="openshift-marketplace/redhat-operators-5jwpg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-5jwpg\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:37 crc kubenswrapper[4716]: I1209 15:12:37.705705 4716 status_manager.go:851] "Failed to get status for pod" podUID="02be1451-6780-479c-ab94-37503fea3645" pod="openshift-marketplace/certified-operators-6grj2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-6grj2\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:37 crc kubenswrapper[4716]: I1209 15:12:37.705874 4716 status_manager.go:851] "Failed to get status for pod" podUID="91c555de-cffa-4f7b-90a0-df2c5d3b0855" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:37 crc kubenswrapper[4716]: I1209 15:12:37.706146 4716 status_manager.go:851] "Failed to get status for pod" podUID="6e8a9e79-4b63-4c3f-968c-9524e682af80" pod="openshift-marketplace/community-operators-f8rnc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-f8rnc\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:37 crc kubenswrapper[4716]: I1209 15:12:37.706600 4716 status_manager.go:851] "Failed to get status for pod" podUID="07e1c69c-8b33-4342-8632-010554dfd1d5" pod="openshift-marketplace/redhat-operators-v42h6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-v42h6\": dial tcp 38.102.83.66:6443: connect: connection refused" Dec 09 15:12:38 crc kubenswrapper[4716]: I1209 15:12:38.716473 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"c6655fe3361511f7add31c6221ffe139a33a908df21f8ab6faa6bf77c5e90741"} Dec 09 15:12:38 crc kubenswrapper[4716]: I1209 15:12:38.717294 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"f5724c95bf8b32ae14da455a88fc35ef04c59b65a573d45a233c59481d4e4ec4"} Dec 09 15:12:38 crc kubenswrapper[4716]: I1209 15:12:38.717313 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"01ef974b78df9beb70d301cd6d20af6525be1e29e207254c46065f88ab985a8c"} Dec 09 15:12:38 crc kubenswrapper[4716]: I1209 15:12:38.717327 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"f463289f9e15774bcb6a55e13ef29f8073a2349bde8ae7c64351e32e242ba17c"} Dec 09 15:12:38 crc kubenswrapper[4716]: I1209 15:12:38.723157 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Dec 09 15:12:38 crc kubenswrapper[4716]: I1209 15:12:38.723225 4716 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302" exitCode=1 Dec 09 15:12:38 crc kubenswrapper[4716]: I1209 15:12:38.723270 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302"} Dec 09 15:12:38 crc kubenswrapper[4716]: I1209 15:12:38.723937 4716 scope.go:117] "RemoveContainer" containerID="f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302" Dec 09 15:12:39 crc kubenswrapper[4716]: I1209 15:12:39.734280 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Dec 09 15:12:39 crc kubenswrapper[4716]: I1209 15:12:39.734841 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"412e6919879491cd00942b03fcf1b61d868838e467d9ce88ebf02cf2775b1bee"} Dec 09 15:12:39 crc kubenswrapper[4716]: I1209 15:12:39.738638 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"d3196513ec1fceb5c5f7fd26aa3bbca318d9727737d2f405675420eba09cb3f2"} Dec 09 15:12:39 crc kubenswrapper[4716]: I1209 15:12:39.739140 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:12:39 crc kubenswrapper[4716]: I1209 15:12:39.739268 4716 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="e7ed35d3-6b08-4863-a641-e63313d65d8a" Dec 09 15:12:39 crc kubenswrapper[4716]: I1209 15:12:39.739297 4716 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="e7ed35d3-6b08-4863-a641-e63313d65d8a" Dec 09 15:12:40 crc kubenswrapper[4716]: I1209 15:12:40.332765 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 15:12:42 crc kubenswrapper[4716]: I1209 15:12:42.235481 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:12:42 crc kubenswrapper[4716]: I1209 15:12:42.235552 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:12:42 crc kubenswrapper[4716]: I1209 15:12:42.241831 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:12:44 crc kubenswrapper[4716]: I1209 15:12:44.071268 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 15:12:44 crc kubenswrapper[4716]: I1209 15:12:44.071438 4716 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Dec 09 15:12:44 crc kubenswrapper[4716]: I1209 15:12:44.073181 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Dec 09 15:12:44 crc kubenswrapper[4716]: I1209 15:12:44.749739 4716 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:12:45 crc kubenswrapper[4716]: I1209 15:12:45.770265 4716 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="e7ed35d3-6b08-4863-a641-e63313d65d8a" Dec 09 15:12:45 crc kubenswrapper[4716]: I1209 15:12:45.774793 4716 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="e7ed35d3-6b08-4863-a641-e63313d65d8a" Dec 09 15:12:45 crc kubenswrapper[4716]: I1209 15:12:45.774312 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:12:45 crc kubenswrapper[4716]: I1209 15:12:45.778421 4716 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="2258896f-d00d-4a4d-b7bc-a59693dfbd46" Dec 09 15:12:46 crc kubenswrapper[4716]: I1209 15:12:46.777401 4716 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="e7ed35d3-6b08-4863-a641-e63313d65d8a" Dec 09 15:12:46 crc kubenswrapper[4716]: I1209 15:12:46.779513 4716 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="e7ed35d3-6b08-4863-a641-e63313d65d8a" Dec 09 15:12:46 crc kubenswrapper[4716]: I1209 15:12:46.781215 4716 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="2258896f-d00d-4a4d-b7bc-a59693dfbd46" Dec 09 15:12:53 crc kubenswrapper[4716]: I1209 15:12:53.072301 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Dec 09 15:12:53 crc kubenswrapper[4716]: I1209 15:12:53.083668 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Dec 09 15:12:53 crc kubenswrapper[4716]: I1209 15:12:53.202055 4716 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Dec 09 15:12:53 crc kubenswrapper[4716]: I1209 15:12:53.751553 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Dec 09 15:12:53 crc kubenswrapper[4716]: I1209 15:12:53.960088 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Dec 09 15:12:54 crc kubenswrapper[4716]: I1209 15:12:54.071256 4716 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Dec 09 15:12:54 crc kubenswrapper[4716]: I1209 15:12:54.071341 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Dec 09 15:12:54 crc kubenswrapper[4716]: I1209 15:12:54.174956 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Dec 09 15:12:54 crc kubenswrapper[4716]: I1209 15:12:54.606254 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Dec 09 15:12:54 crc kubenswrapper[4716]: I1209 15:12:54.900741 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Dec 09 15:12:55 crc kubenswrapper[4716]: I1209 15:12:55.319335 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Dec 09 15:12:55 crc kubenswrapper[4716]: I1209 15:12:55.756847 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Dec 09 15:12:56 crc kubenswrapper[4716]: I1209 15:12:56.054742 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Dec 09 15:12:56 crc kubenswrapper[4716]: I1209 15:12:56.833945 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Dec 09 15:12:57 crc kubenswrapper[4716]: I1209 15:12:57.075097 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Dec 09 15:12:57 crc kubenswrapper[4716]: I1209 15:12:57.204447 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Dec 09 15:12:57 crc kubenswrapper[4716]: I1209 15:12:57.292853 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Dec 09 15:12:57 crc kubenswrapper[4716]: I1209 15:12:57.417021 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Dec 09 15:12:57 crc kubenswrapper[4716]: I1209 15:12:57.522383 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Dec 09 15:12:57 crc kubenswrapper[4716]: I1209 15:12:57.662036 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Dec 09 15:12:57 crc kubenswrapper[4716]: I1209 15:12:57.747211 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Dec 09 15:12:58 crc kubenswrapper[4716]: I1209 15:12:58.007495 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Dec 09 15:12:58 crc kubenswrapper[4716]: I1209 15:12:58.060160 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Dec 09 15:12:58 crc kubenswrapper[4716]: I1209 15:12:58.105936 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Dec 09 15:12:58 crc kubenswrapper[4716]: I1209 15:12:58.396652 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Dec 09 15:12:58 crc kubenswrapper[4716]: I1209 15:12:58.433498 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Dec 09 15:12:58 crc kubenswrapper[4716]: I1209 15:12:58.753468 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Dec 09 15:12:59 crc kubenswrapper[4716]: I1209 15:12:59.002300 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Dec 09 15:12:59 crc kubenswrapper[4716]: I1209 15:12:59.061838 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Dec 09 15:12:59 crc kubenswrapper[4716]: I1209 15:12:59.085740 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Dec 09 15:12:59 crc kubenswrapper[4716]: I1209 15:12:59.401954 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Dec 09 15:12:59 crc kubenswrapper[4716]: I1209 15:12:59.430962 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Dec 09 15:12:59 crc kubenswrapper[4716]: I1209 15:12:59.454376 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Dec 09 15:12:59 crc kubenswrapper[4716]: I1209 15:12:59.652636 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Dec 09 15:12:59 crc kubenswrapper[4716]: I1209 15:12:59.692243 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Dec 09 15:12:59 crc kubenswrapper[4716]: I1209 15:12:59.787123 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Dec 09 15:12:59 crc kubenswrapper[4716]: I1209 15:12:59.792186 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Dec 09 15:12:59 crc kubenswrapper[4716]: I1209 15:12:59.826909 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Dec 09 15:13:00 crc kubenswrapper[4716]: I1209 15:13:00.123411 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Dec 09 15:13:00 crc kubenswrapper[4716]: I1209 15:13:00.158447 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Dec 09 15:13:00 crc kubenswrapper[4716]: I1209 15:13:00.264266 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Dec 09 15:13:00 crc kubenswrapper[4716]: I1209 15:13:00.416713 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Dec 09 15:13:00 crc kubenswrapper[4716]: I1209 15:13:00.483521 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Dec 09 15:13:00 crc kubenswrapper[4716]: I1209 15:13:00.500835 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Dec 09 15:13:00 crc kubenswrapper[4716]: I1209 15:13:00.535724 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Dec 09 15:13:00 crc kubenswrapper[4716]: I1209 15:13:00.621372 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Dec 09 15:13:00 crc kubenswrapper[4716]: I1209 15:13:00.648857 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Dec 09 15:13:00 crc kubenswrapper[4716]: I1209 15:13:00.712363 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Dec 09 15:13:00 crc kubenswrapper[4716]: I1209 15:13:00.728195 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Dec 09 15:13:00 crc kubenswrapper[4716]: I1209 15:13:00.759884 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Dec 09 15:13:00 crc kubenswrapper[4716]: I1209 15:13:00.815557 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 09 15:13:00 crc kubenswrapper[4716]: I1209 15:13:00.953850 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Dec 09 15:13:01 crc kubenswrapper[4716]: I1209 15:13:01.062821 4716 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Dec 09 15:13:01 crc kubenswrapper[4716]: I1209 15:13:01.105680 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Dec 09 15:13:01 crc kubenswrapper[4716]: I1209 15:13:01.153735 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Dec 09 15:13:01 crc kubenswrapper[4716]: I1209 15:13:01.393762 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Dec 09 15:13:01 crc kubenswrapper[4716]: I1209 15:13:01.615485 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Dec 09 15:13:01 crc kubenswrapper[4716]: I1209 15:13:01.781901 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Dec 09 15:13:01 crc kubenswrapper[4716]: I1209 15:13:01.837211 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Dec 09 15:13:01 crc kubenswrapper[4716]: I1209 15:13:01.969648 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Dec 09 15:13:01 crc kubenswrapper[4716]: I1209 15:13:01.970291 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Dec 09 15:13:01 crc kubenswrapper[4716]: I1209 15:13:01.970533 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Dec 09 15:13:01 crc kubenswrapper[4716]: I1209 15:13:01.970994 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Dec 09 15:13:01 crc kubenswrapper[4716]: I1209 15:13:01.983672 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Dec 09 15:13:02 crc kubenswrapper[4716]: I1209 15:13:02.002648 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Dec 09 15:13:02 crc kubenswrapper[4716]: I1209 15:13:02.021384 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Dec 09 15:13:02 crc kubenswrapper[4716]: I1209 15:13:02.178768 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Dec 09 15:13:02 crc kubenswrapper[4716]: I1209 15:13:02.358773 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Dec 09 15:13:02 crc kubenswrapper[4716]: I1209 15:13:02.388337 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Dec 09 15:13:02 crc kubenswrapper[4716]: I1209 15:13:02.390836 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Dec 09 15:13:02 crc kubenswrapper[4716]: I1209 15:13:02.436810 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Dec 09 15:13:02 crc kubenswrapper[4716]: I1209 15:13:02.516839 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Dec 09 15:13:02 crc kubenswrapper[4716]: I1209 15:13:02.523938 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Dec 09 15:13:02 crc kubenswrapper[4716]: I1209 15:13:02.534891 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Dec 09 15:13:02 crc kubenswrapper[4716]: I1209 15:13:02.591155 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Dec 09 15:13:02 crc kubenswrapper[4716]: I1209 15:13:02.616518 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Dec 09 15:13:02 crc kubenswrapper[4716]: I1209 15:13:02.702932 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Dec 09 15:13:02 crc kubenswrapper[4716]: I1209 15:13:02.703453 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Dec 09 15:13:02 crc kubenswrapper[4716]: I1209 15:13:02.732549 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Dec 09 15:13:02 crc kubenswrapper[4716]: I1209 15:13:02.733816 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Dec 09 15:13:02 crc kubenswrapper[4716]: I1209 15:13:02.896607 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Dec 09 15:13:03 crc kubenswrapper[4716]: I1209 15:13:03.009077 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Dec 09 15:13:03 crc kubenswrapper[4716]: I1209 15:13:03.084119 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Dec 09 15:13:03 crc kubenswrapper[4716]: I1209 15:13:03.107791 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Dec 09 15:13:03 crc kubenswrapper[4716]: I1209 15:13:03.116033 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Dec 09 15:13:03 crc kubenswrapper[4716]: I1209 15:13:03.144114 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Dec 09 15:13:03 crc kubenswrapper[4716]: I1209 15:13:03.189011 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Dec 09 15:13:03 crc kubenswrapper[4716]: I1209 15:13:03.209384 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Dec 09 15:13:03 crc kubenswrapper[4716]: I1209 15:13:03.220347 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Dec 09 15:13:03 crc kubenswrapper[4716]: I1209 15:13:03.372865 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Dec 09 15:13:03 crc kubenswrapper[4716]: I1209 15:13:03.392892 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Dec 09 15:13:03 crc kubenswrapper[4716]: I1209 15:13:03.393291 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Dec 09 15:13:03 crc kubenswrapper[4716]: I1209 15:13:03.479418 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Dec 09 15:13:03 crc kubenswrapper[4716]: I1209 15:13:03.522263 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Dec 09 15:13:03 crc kubenswrapper[4716]: I1209 15:13:03.565370 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Dec 09 15:13:03 crc kubenswrapper[4716]: I1209 15:13:03.627986 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Dec 09 15:13:03 crc kubenswrapper[4716]: I1209 15:13:03.708813 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Dec 09 15:13:03 crc kubenswrapper[4716]: I1209 15:13:03.797743 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Dec 09 15:13:03 crc kubenswrapper[4716]: I1209 15:13:03.835244 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Dec 09 15:13:03 crc kubenswrapper[4716]: I1209 15:13:03.877698 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Dec 09 15:13:03 crc kubenswrapper[4716]: I1209 15:13:03.942183 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Dec 09 15:13:03 crc kubenswrapper[4716]: I1209 15:13:03.969885 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Dec 09 15:13:04 crc kubenswrapper[4716]: I1209 15:13:04.002375 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Dec 09 15:13:04 crc kubenswrapper[4716]: I1209 15:13:04.026640 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Dec 09 15:13:04 crc kubenswrapper[4716]: I1209 15:13:04.069728 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Dec 09 15:13:04 crc kubenswrapper[4716]: I1209 15:13:04.071132 4716 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Dec 09 15:13:04 crc kubenswrapper[4716]: I1209 15:13:04.071190 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Dec 09 15:13:04 crc kubenswrapper[4716]: I1209 15:13:04.071285 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 15:13:04 crc kubenswrapper[4716]: I1209 15:13:04.072421 4716 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="kube-controller-manager" containerStatusID={"Type":"cri-o","ID":"412e6919879491cd00942b03fcf1b61d868838e467d9ce88ebf02cf2775b1bee"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container kube-controller-manager failed startup probe, will be restarted" Dec 09 15:13:04 crc kubenswrapper[4716]: I1209 15:13:04.072614 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" containerID="cri-o://412e6919879491cd00942b03fcf1b61d868838e467d9ce88ebf02cf2775b1bee" gracePeriod=30 Dec 09 15:13:04 crc kubenswrapper[4716]: I1209 15:13:04.076310 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Dec 09 15:13:04 crc kubenswrapper[4716]: I1209 15:13:04.088087 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Dec 09 15:13:04 crc kubenswrapper[4716]: I1209 15:13:04.172076 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Dec 09 15:13:04 crc kubenswrapper[4716]: I1209 15:13:04.175981 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Dec 09 15:13:04 crc kubenswrapper[4716]: I1209 15:13:04.212760 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Dec 09 15:13:04 crc kubenswrapper[4716]: I1209 15:13:04.220927 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Dec 09 15:13:04 crc kubenswrapper[4716]: I1209 15:13:04.256229 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Dec 09 15:13:04 crc kubenswrapper[4716]: I1209 15:13:04.264362 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Dec 09 15:13:04 crc kubenswrapper[4716]: I1209 15:13:04.319258 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Dec 09 15:13:04 crc kubenswrapper[4716]: I1209 15:13:04.412066 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Dec 09 15:13:04 crc kubenswrapper[4716]: I1209 15:13:04.415230 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Dec 09 15:13:04 crc kubenswrapper[4716]: I1209 15:13:04.534470 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Dec 09 15:13:04 crc kubenswrapper[4716]: I1209 15:13:04.573719 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Dec 09 15:13:04 crc kubenswrapper[4716]: I1209 15:13:04.615712 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Dec 09 15:13:04 crc kubenswrapper[4716]: I1209 15:13:04.657353 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Dec 09 15:13:04 crc kubenswrapper[4716]: I1209 15:13:04.676665 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Dec 09 15:13:04 crc kubenswrapper[4716]: I1209 15:13:04.759032 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Dec 09 15:13:04 crc kubenswrapper[4716]: I1209 15:13:04.776820 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Dec 09 15:13:04 crc kubenswrapper[4716]: I1209 15:13:04.915669 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Dec 09 15:13:04 crc kubenswrapper[4716]: I1209 15:13:04.931155 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Dec 09 15:13:04 crc kubenswrapper[4716]: I1209 15:13:04.980801 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Dec 09 15:13:05 crc kubenswrapper[4716]: I1209 15:13:05.076230 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Dec 09 15:13:05 crc kubenswrapper[4716]: I1209 15:13:05.103764 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Dec 09 15:13:05 crc kubenswrapper[4716]: I1209 15:13:05.103813 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Dec 09 15:13:05 crc kubenswrapper[4716]: I1209 15:13:05.105107 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Dec 09 15:13:05 crc kubenswrapper[4716]: I1209 15:13:05.171768 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Dec 09 15:13:05 crc kubenswrapper[4716]: I1209 15:13:05.360274 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Dec 09 15:13:05 crc kubenswrapper[4716]: I1209 15:13:05.419182 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Dec 09 15:13:05 crc kubenswrapper[4716]: I1209 15:13:05.454282 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Dec 09 15:13:05 crc kubenswrapper[4716]: I1209 15:13:05.459917 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Dec 09 15:13:05 crc kubenswrapper[4716]: I1209 15:13:05.506390 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Dec 09 15:13:05 crc kubenswrapper[4716]: I1209 15:13:05.560340 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Dec 09 15:13:05 crc kubenswrapper[4716]: I1209 15:13:05.569849 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Dec 09 15:13:05 crc kubenswrapper[4716]: I1209 15:13:05.600033 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Dec 09 15:13:05 crc kubenswrapper[4716]: I1209 15:13:05.659378 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Dec 09 15:13:05 crc kubenswrapper[4716]: I1209 15:13:05.754168 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Dec 09 15:13:05 crc kubenswrapper[4716]: I1209 15:13:05.762330 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Dec 09 15:13:05 crc kubenswrapper[4716]: I1209 15:13:05.782044 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Dec 09 15:13:05 crc kubenswrapper[4716]: I1209 15:13:05.803938 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Dec 09 15:13:05 crc kubenswrapper[4716]: I1209 15:13:05.818310 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Dec 09 15:13:05 crc kubenswrapper[4716]: I1209 15:13:05.866393 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Dec 09 15:13:06 crc kubenswrapper[4716]: I1209 15:13:06.015937 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Dec 09 15:13:06 crc kubenswrapper[4716]: I1209 15:13:06.026138 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Dec 09 15:13:06 crc kubenswrapper[4716]: I1209 15:13:06.040956 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Dec 09 15:13:06 crc kubenswrapper[4716]: I1209 15:13:06.217264 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Dec 09 15:13:06 crc kubenswrapper[4716]: I1209 15:13:06.283071 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Dec 09 15:13:06 crc kubenswrapper[4716]: I1209 15:13:06.303350 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Dec 09 15:13:06 crc kubenswrapper[4716]: I1209 15:13:06.322836 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 09 15:13:06 crc kubenswrapper[4716]: I1209 15:13:06.325793 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Dec 09 15:13:06 crc kubenswrapper[4716]: I1209 15:13:06.475086 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Dec 09 15:13:06 crc kubenswrapper[4716]: I1209 15:13:06.483128 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Dec 09 15:13:06 crc kubenswrapper[4716]: I1209 15:13:06.613564 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Dec 09 15:13:06 crc kubenswrapper[4716]: I1209 15:13:06.622064 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Dec 09 15:13:06 crc kubenswrapper[4716]: I1209 15:13:06.632048 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Dec 09 15:13:06 crc kubenswrapper[4716]: I1209 15:13:06.634190 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Dec 09 15:13:06 crc kubenswrapper[4716]: I1209 15:13:06.721446 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Dec 09 15:13:06 crc kubenswrapper[4716]: I1209 15:13:06.738187 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Dec 09 15:13:06 crc kubenswrapper[4716]: I1209 15:13:06.785762 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Dec 09 15:13:06 crc kubenswrapper[4716]: I1209 15:13:06.826826 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Dec 09 15:13:06 crc kubenswrapper[4716]: I1209 15:13:06.838182 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 09 15:13:06 crc kubenswrapper[4716]: I1209 15:13:06.894380 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Dec 09 15:13:06 crc kubenswrapper[4716]: I1209 15:13:06.899452 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Dec 09 15:13:06 crc kubenswrapper[4716]: I1209 15:13:06.925287 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Dec 09 15:13:06 crc kubenswrapper[4716]: I1209 15:13:06.937747 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Dec 09 15:13:06 crc kubenswrapper[4716]: I1209 15:13:06.990640 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Dec 09 15:13:07 crc kubenswrapper[4716]: I1209 15:13:07.009595 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Dec 09 15:13:07 crc kubenswrapper[4716]: I1209 15:13:07.032226 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Dec 09 15:13:07 crc kubenswrapper[4716]: I1209 15:13:07.076760 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Dec 09 15:13:07 crc kubenswrapper[4716]: I1209 15:13:07.190616 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Dec 09 15:13:07 crc kubenswrapper[4716]: I1209 15:13:07.239301 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Dec 09 15:13:07 crc kubenswrapper[4716]: I1209 15:13:07.247431 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Dec 09 15:13:07 crc kubenswrapper[4716]: I1209 15:13:07.264365 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Dec 09 15:13:07 crc kubenswrapper[4716]: I1209 15:13:07.329736 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Dec 09 15:13:07 crc kubenswrapper[4716]: I1209 15:13:07.348854 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Dec 09 15:13:07 crc kubenswrapper[4716]: I1209 15:13:07.384802 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Dec 09 15:13:07 crc kubenswrapper[4716]: I1209 15:13:07.549727 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Dec 09 15:13:07 crc kubenswrapper[4716]: I1209 15:13:07.613116 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Dec 09 15:13:07 crc kubenswrapper[4716]: I1209 15:13:07.692102 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Dec 09 15:13:07 crc kubenswrapper[4716]: I1209 15:13:07.707821 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Dec 09 15:13:07 crc kubenswrapper[4716]: I1209 15:13:07.807467 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Dec 09 15:13:07 crc kubenswrapper[4716]: I1209 15:13:07.807557 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Dec 09 15:13:07 crc kubenswrapper[4716]: I1209 15:13:07.817705 4716 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Dec 09 15:13:07 crc kubenswrapper[4716]: I1209 15:13:07.965978 4716 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Dec 09 15:13:07 crc kubenswrapper[4716]: I1209 15:13:07.966376 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-v42h6" podStartSLOduration=49.957009909 podStartE2EDuration="2m8.966349499s" podCreationTimestamp="2025-12-09 15:10:59 +0000 UTC" firstStartedPulling="2025-12-09 15:11:01.999939043 +0000 UTC m=+149.154683031" lastFinishedPulling="2025-12-09 15:12:21.009278633 +0000 UTC m=+228.164022621" observedRunningTime="2025-12-09 15:12:21.701311988 +0000 UTC m=+228.856055996" watchObservedRunningTime="2025-12-09 15:13:07.966349499 +0000 UTC m=+275.121093487" Dec 09 15:13:07 crc kubenswrapper[4716]: I1209 15:13:07.973486 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-authentication/oauth-openshift-558db77b4-bj2zv"] Dec 09 15:13:07 crc kubenswrapper[4716]: I1209 15:13:07.973583 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Dec 09 15:13:07 crc kubenswrapper[4716]: I1209 15:13:07.978812 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 09 15:13:08 crc kubenswrapper[4716]: I1209 15:13:08.002069 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=24.002039045 podStartE2EDuration="24.002039045s" podCreationTimestamp="2025-12-09 15:12:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:13:07.996013175 +0000 UTC m=+275.150757163" watchObservedRunningTime="2025-12-09 15:13:08.002039045 +0000 UTC m=+275.156783033" Dec 09 15:13:08 crc kubenswrapper[4716]: I1209 15:13:08.015938 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Dec 09 15:13:08 crc kubenswrapper[4716]: I1209 15:13:08.086503 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Dec 09 15:13:08 crc kubenswrapper[4716]: I1209 15:13:08.096104 4716 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Dec 09 15:13:08 crc kubenswrapper[4716]: I1209 15:13:08.163671 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Dec 09 15:13:08 crc kubenswrapper[4716]: I1209 15:13:08.257482 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Dec 09 15:13:08 crc kubenswrapper[4716]: I1209 15:13:08.284332 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Dec 09 15:13:08 crc kubenswrapper[4716]: I1209 15:13:08.298229 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Dec 09 15:13:08 crc kubenswrapper[4716]: I1209 15:13:08.402760 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Dec 09 15:13:08 crc kubenswrapper[4716]: I1209 15:13:08.417782 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Dec 09 15:13:08 crc kubenswrapper[4716]: I1209 15:13:08.466458 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Dec 09 15:13:08 crc kubenswrapper[4716]: I1209 15:13:08.586905 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Dec 09 15:13:08 crc kubenswrapper[4716]: I1209 15:13:08.591044 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Dec 09 15:13:08 crc kubenswrapper[4716]: I1209 15:13:08.595905 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Dec 09 15:13:08 crc kubenswrapper[4716]: I1209 15:13:08.683245 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Dec 09 15:13:08 crc kubenswrapper[4716]: I1209 15:13:08.792070 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Dec 09 15:13:08 crc kubenswrapper[4716]: I1209 15:13:08.922659 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Dec 09 15:13:08 crc kubenswrapper[4716]: I1209 15:13:08.940760 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Dec 09 15:13:09 crc kubenswrapper[4716]: I1209 15:13:09.080042 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Dec 09 15:13:09 crc kubenswrapper[4716]: I1209 15:13:09.144706 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Dec 09 15:13:09 crc kubenswrapper[4716]: I1209 15:13:09.220762 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" path="/var/lib/kubelet/pods/867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4/volumes" Dec 09 15:13:09 crc kubenswrapper[4716]: I1209 15:13:09.493691 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Dec 09 15:13:09 crc kubenswrapper[4716]: I1209 15:13:09.562280 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Dec 09 15:13:09 crc kubenswrapper[4716]: I1209 15:13:09.594918 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Dec 09 15:13:09 crc kubenswrapper[4716]: I1209 15:13:09.634135 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Dec 09 15:13:09 crc kubenswrapper[4716]: I1209 15:13:09.665573 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Dec 09 15:13:09 crc kubenswrapper[4716]: I1209 15:13:09.706740 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Dec 09 15:13:09 crc kubenswrapper[4716]: I1209 15:13:09.723948 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Dec 09 15:13:09 crc kubenswrapper[4716]: I1209 15:13:09.740976 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Dec 09 15:13:09 crc kubenswrapper[4716]: I1209 15:13:09.819549 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Dec 09 15:13:09 crc kubenswrapper[4716]: I1209 15:13:09.846687 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Dec 09 15:13:10 crc kubenswrapper[4716]: I1209 15:13:10.000757 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Dec 09 15:13:10 crc kubenswrapper[4716]: I1209 15:13:10.069173 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Dec 09 15:13:10 crc kubenswrapper[4716]: I1209 15:13:10.174166 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Dec 09 15:13:10 crc kubenswrapper[4716]: I1209 15:13:10.282964 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Dec 09 15:13:10 crc kubenswrapper[4716]: I1209 15:13:10.425551 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Dec 09 15:13:10 crc kubenswrapper[4716]: I1209 15:13:10.450049 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Dec 09 15:13:10 crc kubenswrapper[4716]: I1209 15:13:10.526390 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Dec 09 15:13:10 crc kubenswrapper[4716]: I1209 15:13:10.881377 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Dec 09 15:13:11 crc kubenswrapper[4716]: I1209 15:13:11.392298 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Dec 09 15:13:11 crc kubenswrapper[4716]: I1209 15:13:11.660702 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Dec 09 15:13:11 crc kubenswrapper[4716]: I1209 15:13:11.768783 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Dec 09 15:13:11 crc kubenswrapper[4716]: I1209 15:13:11.879398 4716 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.042108 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.299755 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.451610 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.581398 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.618676 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-54b5c98c4-rk2ms"] Dec 09 15:13:12 crc kubenswrapper[4716]: E1209 15:13:12.618983 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91c555de-cffa-4f7b-90a0-df2c5d3b0855" containerName="installer" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.619000 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="91c555de-cffa-4f7b-90a0-df2c5d3b0855" containerName="installer" Dec 09 15:13:12 crc kubenswrapper[4716]: E1209 15:13:12.619013 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" containerName="oauth-openshift" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.619020 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" containerName="oauth-openshift" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.619115 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="867fb86f-4ac3-4c86-b2e3-8dad4c34d3b4" containerName="oauth-openshift" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.619135 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="91c555de-cffa-4f7b-90a0-df2c5d3b0855" containerName="installer" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.619798 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.623954 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.623969 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.624116 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.625322 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.625544 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.626132 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.627257 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.627391 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.627272 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.632485 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.632788 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.632946 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.638860 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.642440 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-54b5c98c4-rk2ms"] Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.645751 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.650400 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.724948 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.734807 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/ae964b69-3ea8-4b53-85c5-72f13effbfb2-v4-0-config-system-serving-cert\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.734870 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/ae964b69-3ea8-4b53-85c5-72f13effbfb2-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.734905 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/ae964b69-3ea8-4b53-85c5-72f13effbfb2-v4-0-config-system-cliconfig\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.734943 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzr4s\" (UniqueName: \"kubernetes.io/projected/ae964b69-3ea8-4b53-85c5-72f13effbfb2-kube-api-access-rzr4s\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.735007 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/ae964b69-3ea8-4b53-85c5-72f13effbfb2-v4-0-config-user-template-error\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.735034 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/ae964b69-3ea8-4b53-85c5-72f13effbfb2-v4-0-config-user-template-login\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.735078 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/ae964b69-3ea8-4b53-85c5-72f13effbfb2-v4-0-config-system-router-certs\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.735115 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/ae964b69-3ea8-4b53-85c5-72f13effbfb2-v4-0-config-system-service-ca\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.735142 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ae964b69-3ea8-4b53-85c5-72f13effbfb2-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.735175 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/ae964b69-3ea8-4b53-85c5-72f13effbfb2-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.735205 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/ae964b69-3ea8-4b53-85c5-72f13effbfb2-audit-policies\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.735235 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/ae964b69-3ea8-4b53-85c5-72f13effbfb2-audit-dir\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.735260 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/ae964b69-3ea8-4b53-85c5-72f13effbfb2-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.735289 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/ae964b69-3ea8-4b53-85c5-72f13effbfb2-v4-0-config-system-session\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.836732 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/ae964b69-3ea8-4b53-85c5-72f13effbfb2-v4-0-config-system-router-certs\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.836813 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/ae964b69-3ea8-4b53-85c5-72f13effbfb2-v4-0-config-system-service-ca\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.836851 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ae964b69-3ea8-4b53-85c5-72f13effbfb2-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.837207 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/ae964b69-3ea8-4b53-85c5-72f13effbfb2-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.837252 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/ae964b69-3ea8-4b53-85c5-72f13effbfb2-audit-policies\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.837279 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/ae964b69-3ea8-4b53-85c5-72f13effbfb2-audit-dir\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.837299 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/ae964b69-3ea8-4b53-85c5-72f13effbfb2-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.837321 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/ae964b69-3ea8-4b53-85c5-72f13effbfb2-v4-0-config-system-session\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.837351 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/ae964b69-3ea8-4b53-85c5-72f13effbfb2-v4-0-config-system-serving-cert\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.837381 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/ae964b69-3ea8-4b53-85c5-72f13effbfb2-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.837403 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/ae964b69-3ea8-4b53-85c5-72f13effbfb2-v4-0-config-system-cliconfig\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.837434 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzr4s\" (UniqueName: \"kubernetes.io/projected/ae964b69-3ea8-4b53-85c5-72f13effbfb2-kube-api-access-rzr4s\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.837460 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/ae964b69-3ea8-4b53-85c5-72f13effbfb2-v4-0-config-user-template-error\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.837483 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/ae964b69-3ea8-4b53-85c5-72f13effbfb2-v4-0-config-user-template-login\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.837433 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/ae964b69-3ea8-4b53-85c5-72f13effbfb2-audit-dir\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.838247 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/ae964b69-3ea8-4b53-85c5-72f13effbfb2-audit-policies\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.839122 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/ae964b69-3ea8-4b53-85c5-72f13effbfb2-v4-0-config-system-cliconfig\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.839179 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ae964b69-3ea8-4b53-85c5-72f13effbfb2-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.839181 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/ae964b69-3ea8-4b53-85c5-72f13effbfb2-v4-0-config-system-service-ca\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.844330 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/ae964b69-3ea8-4b53-85c5-72f13effbfb2-v4-0-config-system-session\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.844440 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/ae964b69-3ea8-4b53-85c5-72f13effbfb2-v4-0-config-system-serving-cert\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.844510 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/ae964b69-3ea8-4b53-85c5-72f13effbfb2-v4-0-config-system-router-certs\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.844529 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/ae964b69-3ea8-4b53-85c5-72f13effbfb2-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.844654 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/ae964b69-3ea8-4b53-85c5-72f13effbfb2-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.846051 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/ae964b69-3ea8-4b53-85c5-72f13effbfb2-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.846469 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/ae964b69-3ea8-4b53-85c5-72f13effbfb2-v4-0-config-user-template-login\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.853968 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/ae964b69-3ea8-4b53-85c5-72f13effbfb2-v4-0-config-user-template-error\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.856663 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzr4s\" (UniqueName: \"kubernetes.io/projected/ae964b69-3ea8-4b53-85c5-72f13effbfb2-kube-api-access-rzr4s\") pod \"oauth-openshift-54b5c98c4-rk2ms\" (UID: \"ae964b69-3ea8-4b53-85c5-72f13effbfb2\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.925232 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Dec 09 15:13:12 crc kubenswrapper[4716]: I1209 15:13:12.940572 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:13 crc kubenswrapper[4716]: I1209 15:13:13.145134 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-54b5c98c4-rk2ms"] Dec 09 15:13:13 crc kubenswrapper[4716]: I1209 15:13:13.202036 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Dec 09 15:13:13 crc kubenswrapper[4716]: I1209 15:13:13.349646 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Dec 09 15:13:13 crc kubenswrapper[4716]: I1209 15:13:13.713647 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Dec 09 15:13:13 crc kubenswrapper[4716]: I1209 15:13:13.917849 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Dec 09 15:13:14 crc kubenswrapper[4716]: I1209 15:13:14.041443 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" event={"ID":"ae964b69-3ea8-4b53-85c5-72f13effbfb2","Type":"ContainerStarted","Data":"aa423830d51898d0c36f69ff1640f97c22baacf30b24c8241e0b453410b11776"} Dec 09 15:13:14 crc kubenswrapper[4716]: I1209 15:13:14.041490 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" event={"ID":"ae964b69-3ea8-4b53-85c5-72f13effbfb2","Type":"ContainerStarted","Data":"219da1bbbd31b055bf9fd1b0cbccb3ef01a88c090d0989657409dc75e5524325"} Dec 09 15:13:14 crc kubenswrapper[4716]: I1209 15:13:14.041931 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:14 crc kubenswrapper[4716]: I1209 15:13:14.046556 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" Dec 09 15:13:14 crc kubenswrapper[4716]: I1209 15:13:14.065948 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-54b5c98c4-rk2ms" podStartSLOduration=75.065924251 podStartE2EDuration="1m15.065924251s" podCreationTimestamp="2025-12-09 15:11:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:13:14.063508059 +0000 UTC m=+281.218252047" watchObservedRunningTime="2025-12-09 15:13:14.065924251 +0000 UTC m=+281.220668229" Dec 09 15:13:18 crc kubenswrapper[4716]: I1209 15:13:18.558176 4716 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Dec 09 15:13:18 crc kubenswrapper[4716]: I1209 15:13:18.559323 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://d22ce3df36cb7eb1fed6424becbf0321a0f5b3a793aeb67f134d5361c53a4229" gracePeriod=5 Dec 09 15:13:24 crc kubenswrapper[4716]: I1209 15:13:24.110069 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Dec 09 15:13:24 crc kubenswrapper[4716]: I1209 15:13:24.110412 4716 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="d22ce3df36cb7eb1fed6424becbf0321a0f5b3a793aeb67f134d5361c53a4229" exitCode=137 Dec 09 15:13:24 crc kubenswrapper[4716]: I1209 15:13:24.110451 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1fbb05e2ca33714c517fc49ebe45eaadfd7f48e17c33e7e659053695e4342d97" Dec 09 15:13:24 crc kubenswrapper[4716]: I1209 15:13:24.129172 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Dec 09 15:13:24 crc kubenswrapper[4716]: I1209 15:13:24.129246 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 15:13:24 crc kubenswrapper[4716]: I1209 15:13:24.199580 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 09 15:13:24 crc kubenswrapper[4716]: I1209 15:13:24.199661 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 09 15:13:24 crc kubenswrapper[4716]: I1209 15:13:24.199721 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 09 15:13:24 crc kubenswrapper[4716]: I1209 15:13:24.199741 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 15:13:24 crc kubenswrapper[4716]: I1209 15:13:24.199793 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 09 15:13:24 crc kubenswrapper[4716]: I1209 15:13:24.199835 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 09 15:13:24 crc kubenswrapper[4716]: I1209 15:13:24.199859 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 15:13:24 crc kubenswrapper[4716]: I1209 15:13:24.199949 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 15:13:24 crc kubenswrapper[4716]: I1209 15:13:24.200042 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 15:13:24 crc kubenswrapper[4716]: I1209 15:13:24.200255 4716 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Dec 09 15:13:24 crc kubenswrapper[4716]: I1209 15:13:24.200267 4716 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Dec 09 15:13:24 crc kubenswrapper[4716]: I1209 15:13:24.200277 4716 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Dec 09 15:13:24 crc kubenswrapper[4716]: I1209 15:13:24.200286 4716 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Dec 09 15:13:24 crc kubenswrapper[4716]: I1209 15:13:24.207518 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 15:13:24 crc kubenswrapper[4716]: I1209 15:13:24.301499 4716 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Dec 09 15:13:25 crc kubenswrapper[4716]: I1209 15:13:25.115381 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 09 15:13:25 crc kubenswrapper[4716]: I1209 15:13:25.222103 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Dec 09 15:13:29 crc kubenswrapper[4716]: I1209 15:13:29.138728 4716 generic.go:334] "Generic (PLEG): container finished" podID="a25d26a9-7c6b-455e-9b8d-cc2fba08c576" containerID="977dad93384f53ac03b0809aba7382612d2054b587d18b1af2f99c77b79b772f" exitCode=0 Dec 09 15:13:29 crc kubenswrapper[4716]: I1209 15:13:29.138823 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-2rcsb" event={"ID":"a25d26a9-7c6b-455e-9b8d-cc2fba08c576","Type":"ContainerDied","Data":"977dad93384f53ac03b0809aba7382612d2054b587d18b1af2f99c77b79b772f"} Dec 09 15:13:29 crc kubenswrapper[4716]: I1209 15:13:29.139763 4716 scope.go:117] "RemoveContainer" containerID="977dad93384f53ac03b0809aba7382612d2054b587d18b1af2f99c77b79b772f" Dec 09 15:13:30 crc kubenswrapper[4716]: I1209 15:13:30.146885 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-2rcsb" event={"ID":"a25d26a9-7c6b-455e-9b8d-cc2fba08c576","Type":"ContainerStarted","Data":"f41778fd6b384a6e965b5d23d789a31b9db3e35ff2f02881f43e7600ef01a0cf"} Dec 09 15:13:30 crc kubenswrapper[4716]: I1209 15:13:30.147651 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-2rcsb" Dec 09 15:13:30 crc kubenswrapper[4716]: I1209 15:13:30.150290 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-2rcsb" Dec 09 15:13:31 crc kubenswrapper[4716]: I1209 15:13:31.821697 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-pfn66"] Dec 09 15:13:31 crc kubenswrapper[4716]: I1209 15:13:31.827408 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-bs6mn"] Dec 09 15:13:31 crc kubenswrapper[4716]: I1209 15:13:31.827786 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-bs6mn" podUID="4acd966d-4bae-456f-bffd-9ad6533cc66d" containerName="controller-manager" containerID="cri-o://6071d60b07474389c76e522b45fbce6a4ae60444689d677ed85606ec7aeb2c9b" gracePeriod=30 Dec 09 15:13:32 crc kubenswrapper[4716]: I1209 15:13:32.163585 4716 generic.go:334] "Generic (PLEG): container finished" podID="4acd966d-4bae-456f-bffd-9ad6533cc66d" containerID="6071d60b07474389c76e522b45fbce6a4ae60444689d677ed85606ec7aeb2c9b" exitCode=0 Dec 09 15:13:32 crc kubenswrapper[4716]: I1209 15:13:32.163683 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-bs6mn" event={"ID":"4acd966d-4bae-456f-bffd-9ad6533cc66d","Type":"ContainerDied","Data":"6071d60b07474389c76e522b45fbce6a4ae60444689d677ed85606ec7aeb2c9b"} Dec 09 15:13:32 crc kubenswrapper[4716]: I1209 15:13:32.164056 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pfn66" podUID="d90937ae-2446-41f4-94eb-e928c5d449de" containerName="route-controller-manager" containerID="cri-o://537d2f96c04f48ad73087db180f4f639e8485bba528eb54d2df37f724faaa90d" gracePeriod=30 Dec 09 15:13:32 crc kubenswrapper[4716]: I1209 15:13:32.309306 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-bs6mn" Dec 09 15:13:32 crc kubenswrapper[4716]: I1209 15:13:32.413391 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4acd966d-4bae-456f-bffd-9ad6533cc66d-proxy-ca-bundles\") pod \"4acd966d-4bae-456f-bffd-9ad6533cc66d\" (UID: \"4acd966d-4bae-456f-bffd-9ad6533cc66d\") " Dec 09 15:13:32 crc kubenswrapper[4716]: I1209 15:13:32.413539 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4acd966d-4bae-456f-bffd-9ad6533cc66d-client-ca\") pod \"4acd966d-4bae-456f-bffd-9ad6533cc66d\" (UID: \"4acd966d-4bae-456f-bffd-9ad6533cc66d\") " Dec 09 15:13:32 crc kubenswrapper[4716]: I1209 15:13:32.413639 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zt54k\" (UniqueName: \"kubernetes.io/projected/4acd966d-4bae-456f-bffd-9ad6533cc66d-kube-api-access-zt54k\") pod \"4acd966d-4bae-456f-bffd-9ad6533cc66d\" (UID: \"4acd966d-4bae-456f-bffd-9ad6533cc66d\") " Dec 09 15:13:32 crc kubenswrapper[4716]: I1209 15:13:32.413677 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4acd966d-4bae-456f-bffd-9ad6533cc66d-config\") pod \"4acd966d-4bae-456f-bffd-9ad6533cc66d\" (UID: \"4acd966d-4bae-456f-bffd-9ad6533cc66d\") " Dec 09 15:13:32 crc kubenswrapper[4716]: I1209 15:13:32.413713 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4acd966d-4bae-456f-bffd-9ad6533cc66d-serving-cert\") pod \"4acd966d-4bae-456f-bffd-9ad6533cc66d\" (UID: \"4acd966d-4bae-456f-bffd-9ad6533cc66d\") " Dec 09 15:13:32 crc kubenswrapper[4716]: I1209 15:13:32.415462 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4acd966d-4bae-456f-bffd-9ad6533cc66d-config" (OuterVolumeSpecName: "config") pod "4acd966d-4bae-456f-bffd-9ad6533cc66d" (UID: "4acd966d-4bae-456f-bffd-9ad6533cc66d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:13:32 crc kubenswrapper[4716]: I1209 15:13:32.415600 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4acd966d-4bae-456f-bffd-9ad6533cc66d-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "4acd966d-4bae-456f-bffd-9ad6533cc66d" (UID: "4acd966d-4bae-456f-bffd-9ad6533cc66d"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:13:32 crc kubenswrapper[4716]: I1209 15:13:32.415604 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4acd966d-4bae-456f-bffd-9ad6533cc66d-client-ca" (OuterVolumeSpecName: "client-ca") pod "4acd966d-4bae-456f-bffd-9ad6533cc66d" (UID: "4acd966d-4bae-456f-bffd-9ad6533cc66d"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:13:32 crc kubenswrapper[4716]: I1209 15:13:32.421861 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4acd966d-4bae-456f-bffd-9ad6533cc66d-kube-api-access-zt54k" (OuterVolumeSpecName: "kube-api-access-zt54k") pod "4acd966d-4bae-456f-bffd-9ad6533cc66d" (UID: "4acd966d-4bae-456f-bffd-9ad6533cc66d"). InnerVolumeSpecName "kube-api-access-zt54k". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:13:32 crc kubenswrapper[4716]: I1209 15:13:32.440271 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4acd966d-4bae-456f-bffd-9ad6533cc66d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "4acd966d-4bae-456f-bffd-9ad6533cc66d" (UID: "4acd966d-4bae-456f-bffd-9ad6533cc66d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:13:32 crc kubenswrapper[4716]: I1209 15:13:32.511574 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pfn66" Dec 09 15:13:32 crc kubenswrapper[4716]: I1209 15:13:32.515203 4716 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4acd966d-4bae-456f-bffd-9ad6533cc66d-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Dec 09 15:13:32 crc kubenswrapper[4716]: I1209 15:13:32.515242 4716 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4acd966d-4bae-456f-bffd-9ad6533cc66d-client-ca\") on node \"crc\" DevicePath \"\"" Dec 09 15:13:32 crc kubenswrapper[4716]: I1209 15:13:32.515254 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zt54k\" (UniqueName: \"kubernetes.io/projected/4acd966d-4bae-456f-bffd-9ad6533cc66d-kube-api-access-zt54k\") on node \"crc\" DevicePath \"\"" Dec 09 15:13:32 crc kubenswrapper[4716]: I1209 15:13:32.515268 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4acd966d-4bae-456f-bffd-9ad6533cc66d-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:13:32 crc kubenswrapper[4716]: I1209 15:13:32.515277 4716 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4acd966d-4bae-456f-bffd-9ad6533cc66d-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:13:32 crc kubenswrapper[4716]: I1209 15:13:32.615989 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d90937ae-2446-41f4-94eb-e928c5d449de-serving-cert\") pod \"d90937ae-2446-41f4-94eb-e928c5d449de\" (UID: \"d90937ae-2446-41f4-94eb-e928c5d449de\") " Dec 09 15:13:32 crc kubenswrapper[4716]: I1209 15:13:32.616100 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vw2dm\" (UniqueName: \"kubernetes.io/projected/d90937ae-2446-41f4-94eb-e928c5d449de-kube-api-access-vw2dm\") pod \"d90937ae-2446-41f4-94eb-e928c5d449de\" (UID: \"d90937ae-2446-41f4-94eb-e928c5d449de\") " Dec 09 15:13:32 crc kubenswrapper[4716]: I1209 15:13:32.616144 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d90937ae-2446-41f4-94eb-e928c5d449de-client-ca\") pod \"d90937ae-2446-41f4-94eb-e928c5d449de\" (UID: \"d90937ae-2446-41f4-94eb-e928c5d449de\") " Dec 09 15:13:32 crc kubenswrapper[4716]: I1209 15:13:32.616330 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d90937ae-2446-41f4-94eb-e928c5d449de-config\") pod \"d90937ae-2446-41f4-94eb-e928c5d449de\" (UID: \"d90937ae-2446-41f4-94eb-e928c5d449de\") " Dec 09 15:13:32 crc kubenswrapper[4716]: I1209 15:13:32.617240 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d90937ae-2446-41f4-94eb-e928c5d449de-client-ca" (OuterVolumeSpecName: "client-ca") pod "d90937ae-2446-41f4-94eb-e928c5d449de" (UID: "d90937ae-2446-41f4-94eb-e928c5d449de"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:13:32 crc kubenswrapper[4716]: I1209 15:13:32.618098 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d90937ae-2446-41f4-94eb-e928c5d449de-config" (OuterVolumeSpecName: "config") pod "d90937ae-2446-41f4-94eb-e928c5d449de" (UID: "d90937ae-2446-41f4-94eb-e928c5d449de"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:13:32 crc kubenswrapper[4716]: I1209 15:13:32.619527 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d90937ae-2446-41f4-94eb-e928c5d449de-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "d90937ae-2446-41f4-94eb-e928c5d449de" (UID: "d90937ae-2446-41f4-94eb-e928c5d449de"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:13:32 crc kubenswrapper[4716]: I1209 15:13:32.620150 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d90937ae-2446-41f4-94eb-e928c5d449de-kube-api-access-vw2dm" (OuterVolumeSpecName: "kube-api-access-vw2dm") pod "d90937ae-2446-41f4-94eb-e928c5d449de" (UID: "d90937ae-2446-41f4-94eb-e928c5d449de"). InnerVolumeSpecName "kube-api-access-vw2dm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:13:32 crc kubenswrapper[4716]: I1209 15:13:32.718560 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d90937ae-2446-41f4-94eb-e928c5d449de-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:13:32 crc kubenswrapper[4716]: I1209 15:13:32.718598 4716 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d90937ae-2446-41f4-94eb-e928c5d449de-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:13:32 crc kubenswrapper[4716]: I1209 15:13:32.718614 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vw2dm\" (UniqueName: \"kubernetes.io/projected/d90937ae-2446-41f4-94eb-e928c5d449de-kube-api-access-vw2dm\") on node \"crc\" DevicePath \"\"" Dec 09 15:13:32 crc kubenswrapper[4716]: I1209 15:13:32.718676 4716 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d90937ae-2446-41f4-94eb-e928c5d449de-client-ca\") on node \"crc\" DevicePath \"\"" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.041545 4716 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.060821 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-6bb9db44d5-hvc56"] Dec 09 15:13:33 crc kubenswrapper[4716]: E1209 15:13:33.061144 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d90937ae-2446-41f4-94eb-e928c5d449de" containerName="route-controller-manager" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.061167 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="d90937ae-2446-41f4-94eb-e928c5d449de" containerName="route-controller-manager" Dec 09 15:13:33 crc kubenswrapper[4716]: E1209 15:13:33.061182 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.061190 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Dec 09 15:13:33 crc kubenswrapper[4716]: E1209 15:13:33.061200 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4acd966d-4bae-456f-bffd-9ad6533cc66d" containerName="controller-manager" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.061210 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="4acd966d-4bae-456f-bffd-9ad6533cc66d" containerName="controller-manager" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.061373 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.061385 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="4acd966d-4bae-456f-bffd-9ad6533cc66d" containerName="controller-manager" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.061397 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="d90937ae-2446-41f4-94eb-e928c5d449de" containerName="route-controller-manager" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.062023 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6bb9db44d5-hvc56" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.071963 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6bb9db44d5-hvc56"] Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.076667 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-64f96cc955-p6qqp"] Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.077829 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-p6qqp" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.129281 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2b420902-4d80-4431-9459-99b901cc2a03-serving-cert\") pod \"route-controller-manager-64f96cc955-p6qqp\" (UID: \"2b420902-4d80-4431-9459-99b901cc2a03\") " pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-p6qqp" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.129329 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2b420902-4d80-4431-9459-99b901cc2a03-client-ca\") pod \"route-controller-manager-64f96cc955-p6qqp\" (UID: \"2b420902-4d80-4431-9459-99b901cc2a03\") " pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-p6qqp" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.129379 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2n7bm\" (UniqueName: \"kubernetes.io/projected/5c5b6865-9b80-4832-8375-6c5edfe18311-kube-api-access-2n7bm\") pod \"controller-manager-6bb9db44d5-hvc56\" (UID: \"5c5b6865-9b80-4832-8375-6c5edfe18311\") " pod="openshift-controller-manager/controller-manager-6bb9db44d5-hvc56" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.129402 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5c5b6865-9b80-4832-8375-6c5edfe18311-serving-cert\") pod \"controller-manager-6bb9db44d5-hvc56\" (UID: \"5c5b6865-9b80-4832-8375-6c5edfe18311\") " pod="openshift-controller-manager/controller-manager-6bb9db44d5-hvc56" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.129427 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5c5b6865-9b80-4832-8375-6c5edfe18311-client-ca\") pod \"controller-manager-6bb9db44d5-hvc56\" (UID: \"5c5b6865-9b80-4832-8375-6c5edfe18311\") " pod="openshift-controller-manager/controller-manager-6bb9db44d5-hvc56" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.129496 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b420902-4d80-4431-9459-99b901cc2a03-config\") pod \"route-controller-manager-64f96cc955-p6qqp\" (UID: \"2b420902-4d80-4431-9459-99b901cc2a03\") " pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-p6qqp" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.129593 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5c5b6865-9b80-4832-8375-6c5edfe18311-proxy-ca-bundles\") pod \"controller-manager-6bb9db44d5-hvc56\" (UID: \"5c5b6865-9b80-4832-8375-6c5edfe18311\") " pod="openshift-controller-manager/controller-manager-6bb9db44d5-hvc56" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.129652 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vh2v4\" (UniqueName: \"kubernetes.io/projected/2b420902-4d80-4431-9459-99b901cc2a03-kube-api-access-vh2v4\") pod \"route-controller-manager-64f96cc955-p6qqp\" (UID: \"2b420902-4d80-4431-9459-99b901cc2a03\") " pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-p6qqp" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.129681 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5c5b6865-9b80-4832-8375-6c5edfe18311-config\") pod \"controller-manager-6bb9db44d5-hvc56\" (UID: \"5c5b6865-9b80-4832-8375-6c5edfe18311\") " pod="openshift-controller-manager/controller-manager-6bb9db44d5-hvc56" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.140141 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-64f96cc955-p6qqp"] Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.171561 4716 generic.go:334] "Generic (PLEG): container finished" podID="d90937ae-2446-41f4-94eb-e928c5d449de" containerID="537d2f96c04f48ad73087db180f4f639e8485bba528eb54d2df37f724faaa90d" exitCode=0 Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.171640 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pfn66" event={"ID":"d90937ae-2446-41f4-94eb-e928c5d449de","Type":"ContainerDied","Data":"537d2f96c04f48ad73087db180f4f639e8485bba528eb54d2df37f724faaa90d"} Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.171686 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pfn66" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.172068 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pfn66" event={"ID":"d90937ae-2446-41f4-94eb-e928c5d449de","Type":"ContainerDied","Data":"c4df3decba4cf0113c63157c9570993add444413bb09039c1c08e79e11969790"} Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.172105 4716 scope.go:117] "RemoveContainer" containerID="537d2f96c04f48ad73087db180f4f639e8485bba528eb54d2df37f724faaa90d" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.174915 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-bs6mn" event={"ID":"4acd966d-4bae-456f-bffd-9ad6533cc66d","Type":"ContainerDied","Data":"3945069fb90ed8aeb1f7386a28d8bbf335cca6cdaf0d28d8f1a3a8715a038581"} Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.175044 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-bs6mn" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.207708 4716 scope.go:117] "RemoveContainer" containerID="537d2f96c04f48ad73087db180f4f639e8485bba528eb54d2df37f724faaa90d" Dec 09 15:13:33 crc kubenswrapper[4716]: E1209 15:13:33.208167 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"537d2f96c04f48ad73087db180f4f639e8485bba528eb54d2df37f724faaa90d\": container with ID starting with 537d2f96c04f48ad73087db180f4f639e8485bba528eb54d2df37f724faaa90d not found: ID does not exist" containerID="537d2f96c04f48ad73087db180f4f639e8485bba528eb54d2df37f724faaa90d" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.208214 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"537d2f96c04f48ad73087db180f4f639e8485bba528eb54d2df37f724faaa90d"} err="failed to get container status \"537d2f96c04f48ad73087db180f4f639e8485bba528eb54d2df37f724faaa90d\": rpc error: code = NotFound desc = could not find container \"537d2f96c04f48ad73087db180f4f639e8485bba528eb54d2df37f724faaa90d\": container with ID starting with 537d2f96c04f48ad73087db180f4f639e8485bba528eb54d2df37f724faaa90d not found: ID does not exist" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.208254 4716 scope.go:117] "RemoveContainer" containerID="6071d60b07474389c76e522b45fbce6a4ae60444689d677ed85606ec7aeb2c9b" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.228726 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-pfn66"] Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.231760 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2n7bm\" (UniqueName: \"kubernetes.io/projected/5c5b6865-9b80-4832-8375-6c5edfe18311-kube-api-access-2n7bm\") pod \"controller-manager-6bb9db44d5-hvc56\" (UID: \"5c5b6865-9b80-4832-8375-6c5edfe18311\") " pod="openshift-controller-manager/controller-manager-6bb9db44d5-hvc56" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.231817 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5c5b6865-9b80-4832-8375-6c5edfe18311-serving-cert\") pod \"controller-manager-6bb9db44d5-hvc56\" (UID: \"5c5b6865-9b80-4832-8375-6c5edfe18311\") " pod="openshift-controller-manager/controller-manager-6bb9db44d5-hvc56" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.231862 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5c5b6865-9b80-4832-8375-6c5edfe18311-client-ca\") pod \"controller-manager-6bb9db44d5-hvc56\" (UID: \"5c5b6865-9b80-4832-8375-6c5edfe18311\") " pod="openshift-controller-manager/controller-manager-6bb9db44d5-hvc56" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.231885 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b420902-4d80-4431-9459-99b901cc2a03-config\") pod \"route-controller-manager-64f96cc955-p6qqp\" (UID: \"2b420902-4d80-4431-9459-99b901cc2a03\") " pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-p6qqp" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.231917 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5c5b6865-9b80-4832-8375-6c5edfe18311-proxy-ca-bundles\") pod \"controller-manager-6bb9db44d5-hvc56\" (UID: \"5c5b6865-9b80-4832-8375-6c5edfe18311\") " pod="openshift-controller-manager/controller-manager-6bb9db44d5-hvc56" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.231941 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vh2v4\" (UniqueName: \"kubernetes.io/projected/2b420902-4d80-4431-9459-99b901cc2a03-kube-api-access-vh2v4\") pod \"route-controller-manager-64f96cc955-p6qqp\" (UID: \"2b420902-4d80-4431-9459-99b901cc2a03\") " pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-p6qqp" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.231969 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5c5b6865-9b80-4832-8375-6c5edfe18311-config\") pod \"controller-manager-6bb9db44d5-hvc56\" (UID: \"5c5b6865-9b80-4832-8375-6c5edfe18311\") " pod="openshift-controller-manager/controller-manager-6bb9db44d5-hvc56" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.232028 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2b420902-4d80-4431-9459-99b901cc2a03-serving-cert\") pod \"route-controller-manager-64f96cc955-p6qqp\" (UID: \"2b420902-4d80-4431-9459-99b901cc2a03\") " pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-p6qqp" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.232062 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2b420902-4d80-4431-9459-99b901cc2a03-client-ca\") pod \"route-controller-manager-64f96cc955-p6qqp\" (UID: \"2b420902-4d80-4431-9459-99b901cc2a03\") " pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-p6qqp" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.233031 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2b420902-4d80-4431-9459-99b901cc2a03-client-ca\") pod \"route-controller-manager-64f96cc955-p6qqp\" (UID: \"2b420902-4d80-4431-9459-99b901cc2a03\") " pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-p6qqp" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.233136 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5c5b6865-9b80-4832-8375-6c5edfe18311-client-ca\") pod \"controller-manager-6bb9db44d5-hvc56\" (UID: \"5c5b6865-9b80-4832-8375-6c5edfe18311\") " pod="openshift-controller-manager/controller-manager-6bb9db44d5-hvc56" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.234932 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5c5b6865-9b80-4832-8375-6c5edfe18311-config\") pod \"controller-manager-6bb9db44d5-hvc56\" (UID: \"5c5b6865-9b80-4832-8375-6c5edfe18311\") " pod="openshift-controller-manager/controller-manager-6bb9db44d5-hvc56" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.235708 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5c5b6865-9b80-4832-8375-6c5edfe18311-proxy-ca-bundles\") pod \"controller-manager-6bb9db44d5-hvc56\" (UID: \"5c5b6865-9b80-4832-8375-6c5edfe18311\") " pod="openshift-controller-manager/controller-manager-6bb9db44d5-hvc56" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.236853 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-pfn66"] Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.238881 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2b420902-4d80-4431-9459-99b901cc2a03-serving-cert\") pod \"route-controller-manager-64f96cc955-p6qqp\" (UID: \"2b420902-4d80-4431-9459-99b901cc2a03\") " pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-p6qqp" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.239338 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b420902-4d80-4431-9459-99b901cc2a03-config\") pod \"route-controller-manager-64f96cc955-p6qqp\" (UID: \"2b420902-4d80-4431-9459-99b901cc2a03\") " pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-p6qqp" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.244649 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5c5b6865-9b80-4832-8375-6c5edfe18311-serving-cert\") pod \"controller-manager-6bb9db44d5-hvc56\" (UID: \"5c5b6865-9b80-4832-8375-6c5edfe18311\") " pod="openshift-controller-manager/controller-manager-6bb9db44d5-hvc56" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.244752 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-bs6mn"] Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.249646 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-bs6mn"] Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.259236 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vh2v4\" (UniqueName: \"kubernetes.io/projected/2b420902-4d80-4431-9459-99b901cc2a03-kube-api-access-vh2v4\") pod \"route-controller-manager-64f96cc955-p6qqp\" (UID: \"2b420902-4d80-4431-9459-99b901cc2a03\") " pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-p6qqp" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.260088 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2n7bm\" (UniqueName: \"kubernetes.io/projected/5c5b6865-9b80-4832-8375-6c5edfe18311-kube-api-access-2n7bm\") pod \"controller-manager-6bb9db44d5-hvc56\" (UID: \"5c5b6865-9b80-4832-8375-6c5edfe18311\") " pod="openshift-controller-manager/controller-manager-6bb9db44d5-hvc56" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.378990 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6bb9db44d5-hvc56" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.394080 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-p6qqp" Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.435160 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6bb9db44d5-hvc56"] Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.459226 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-64f96cc955-p6qqp"] Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.700455 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6bb9db44d5-hvc56"] Dec 09 15:13:33 crc kubenswrapper[4716]: W1209 15:13:33.706273 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5c5b6865_9b80_4832_8375_6c5edfe18311.slice/crio-282e65f48eedfbc6ac194b6b70461aa82ffc74abb903ab1343a0ee6b28654b22 WatchSource:0}: Error finding container 282e65f48eedfbc6ac194b6b70461aa82ffc74abb903ab1343a0ee6b28654b22: Status 404 returned error can't find the container with id 282e65f48eedfbc6ac194b6b70461aa82ffc74abb903ab1343a0ee6b28654b22 Dec 09 15:13:33 crc kubenswrapper[4716]: I1209 15:13:33.769412 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-64f96cc955-p6qqp"] Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.196889 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6bb9db44d5-hvc56" event={"ID":"5c5b6865-9b80-4832-8375-6c5edfe18311","Type":"ContainerStarted","Data":"fbebbbd9f5e592a2cef05f9ef4c79a278dc22b4b627189bf1e6bd405c431320a"} Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.197442 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-6bb9db44d5-hvc56" Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.197463 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6bb9db44d5-hvc56" event={"ID":"5c5b6865-9b80-4832-8375-6c5edfe18311","Type":"ContainerStarted","Data":"282e65f48eedfbc6ac194b6b70461aa82ffc74abb903ab1343a0ee6b28654b22"} Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.197056 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-6bb9db44d5-hvc56" podUID="5c5b6865-9b80-4832-8375-6c5edfe18311" containerName="controller-manager" containerID="cri-o://fbebbbd9f5e592a2cef05f9ef4c79a278dc22b4b627189bf1e6bd405c431320a" gracePeriod=30 Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.205347 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-6bb9db44d5-hvc56" Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.214094 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-p6qqp" event={"ID":"2b420902-4d80-4431-9459-99b901cc2a03","Type":"ContainerStarted","Data":"3479a6981ebe4361a997a234e2c9dcbd8a14ecbc83c41dd9e2a07c68d93e710d"} Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.214145 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-p6qqp" event={"ID":"2b420902-4d80-4431-9459-99b901cc2a03","Type":"ContainerStarted","Data":"2edc60e45ff2334073d0b224eb91e24d033f92663d22f7a83e8b7a75061cb75b"} Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.214320 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-p6qqp" podUID="2b420902-4d80-4431-9459-99b901cc2a03" containerName="route-controller-manager" containerID="cri-o://3479a6981ebe4361a997a234e2c9dcbd8a14ecbc83c41dd9e2a07c68d93e710d" gracePeriod=30 Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.214527 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-p6qqp" Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.217492 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-6bb9db44d5-hvc56" podStartSLOduration=1.217469921 podStartE2EDuration="1.217469921s" podCreationTimestamp="2025-12-09 15:13:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:13:34.214484682 +0000 UTC m=+301.369228670" watchObservedRunningTime="2025-12-09 15:13:34.217469921 +0000 UTC m=+301.372213909" Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.232517 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.234119 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.234164 4716 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="412e6919879491cd00942b03fcf1b61d868838e467d9ce88ebf02cf2775b1bee" exitCode=137 Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.234232 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"412e6919879491cd00942b03fcf1b61d868838e467d9ce88ebf02cf2775b1bee"} Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.234277 4716 scope.go:117] "RemoveContainer" containerID="f979732f06b975529b4ddf1d2f6997e1af490ba637142d1100d85912c84c9302" Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.245249 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-p6qqp" podStartSLOduration=1.2452139 podStartE2EDuration="1.2452139s" podCreationTimestamp="2025-12-09 15:13:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:13:34.242117207 +0000 UTC m=+301.396861205" watchObservedRunningTime="2025-12-09 15:13:34.2452139 +0000 UTC m=+301.399957888" Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.399817 4716 patch_prober.go:28] interesting pod/route-controller-manager-64f96cc955-p6qqp container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.58:8443/healthz\": read tcp 10.217.0.2:50676->10.217.0.58:8443: read: connection reset by peer" start-of-body= Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.400227 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-p6qqp" podUID="2b420902-4d80-4431-9459-99b901cc2a03" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.58:8443/healthz\": read tcp 10.217.0.2:50676->10.217.0.58:8443: read: connection reset by peer" Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.640381 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6bb9db44d5-hvc56" Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.714727 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-76df65d987-xclgl"] Dec 09 15:13:34 crc kubenswrapper[4716]: E1209 15:13:34.715099 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c5b6865-9b80-4832-8375-6c5edfe18311" containerName="controller-manager" Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.715115 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c5b6865-9b80-4832-8375-6c5edfe18311" containerName="controller-manager" Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.715231 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c5b6865-9b80-4832-8375-6c5edfe18311" containerName="controller-manager" Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.715834 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-76df65d987-xclgl" Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.721501 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-76df65d987-xclgl"] Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.754315 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5c5b6865-9b80-4832-8375-6c5edfe18311-config\") pod \"5c5b6865-9b80-4832-8375-6c5edfe18311\" (UID: \"5c5b6865-9b80-4832-8375-6c5edfe18311\") " Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.754378 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5c5b6865-9b80-4832-8375-6c5edfe18311-serving-cert\") pod \"5c5b6865-9b80-4832-8375-6c5edfe18311\" (UID: \"5c5b6865-9b80-4832-8375-6c5edfe18311\") " Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.754422 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2n7bm\" (UniqueName: \"kubernetes.io/projected/5c5b6865-9b80-4832-8375-6c5edfe18311-kube-api-access-2n7bm\") pod \"5c5b6865-9b80-4832-8375-6c5edfe18311\" (UID: \"5c5b6865-9b80-4832-8375-6c5edfe18311\") " Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.754446 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5c5b6865-9b80-4832-8375-6c5edfe18311-proxy-ca-bundles\") pod \"5c5b6865-9b80-4832-8375-6c5edfe18311\" (UID: \"5c5b6865-9b80-4832-8375-6c5edfe18311\") " Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.754531 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5c5b6865-9b80-4832-8375-6c5edfe18311-client-ca\") pod \"5c5b6865-9b80-4832-8375-6c5edfe18311\" (UID: \"5c5b6865-9b80-4832-8375-6c5edfe18311\") " Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.755405 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5c5b6865-9b80-4832-8375-6c5edfe18311-client-ca" (OuterVolumeSpecName: "client-ca") pod "5c5b6865-9b80-4832-8375-6c5edfe18311" (UID: "5c5b6865-9b80-4832-8375-6c5edfe18311"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.755438 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5c5b6865-9b80-4832-8375-6c5edfe18311-config" (OuterVolumeSpecName: "config") pod "5c5b6865-9b80-4832-8375-6c5edfe18311" (UID: "5c5b6865-9b80-4832-8375-6c5edfe18311"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.755638 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fd87f5f0-c377-48e6-b58a-893c47cd88cb-serving-cert\") pod \"controller-manager-76df65d987-xclgl\" (UID: \"fd87f5f0-c377-48e6-b58a-893c47cd88cb\") " pod="openshift-controller-manager/controller-manager-76df65d987-xclgl" Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.755610 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5c5b6865-9b80-4832-8375-6c5edfe18311-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "5c5b6865-9b80-4832-8375-6c5edfe18311" (UID: "5c5b6865-9b80-4832-8375-6c5edfe18311"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.755688 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fd87f5f0-c377-48e6-b58a-893c47cd88cb-config\") pod \"controller-manager-76df65d987-xclgl\" (UID: \"fd87f5f0-c377-48e6-b58a-893c47cd88cb\") " pod="openshift-controller-manager/controller-manager-76df65d987-xclgl" Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.755821 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6mz9l\" (UniqueName: \"kubernetes.io/projected/fd87f5f0-c377-48e6-b58a-893c47cd88cb-kube-api-access-6mz9l\") pod \"controller-manager-76df65d987-xclgl\" (UID: \"fd87f5f0-c377-48e6-b58a-893c47cd88cb\") " pod="openshift-controller-manager/controller-manager-76df65d987-xclgl" Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.755882 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fd87f5f0-c377-48e6-b58a-893c47cd88cb-client-ca\") pod \"controller-manager-76df65d987-xclgl\" (UID: \"fd87f5f0-c377-48e6-b58a-893c47cd88cb\") " pod="openshift-controller-manager/controller-manager-76df65d987-xclgl" Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.756055 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/fd87f5f0-c377-48e6-b58a-893c47cd88cb-proxy-ca-bundles\") pod \"controller-manager-76df65d987-xclgl\" (UID: \"fd87f5f0-c377-48e6-b58a-893c47cd88cb\") " pod="openshift-controller-manager/controller-manager-76df65d987-xclgl" Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.756152 4716 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5c5b6865-9b80-4832-8375-6c5edfe18311-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.756168 4716 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5c5b6865-9b80-4832-8375-6c5edfe18311-client-ca\") on node \"crc\" DevicePath \"\"" Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.756179 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5c5b6865-9b80-4832-8375-6c5edfe18311-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.762208 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c5b6865-9b80-4832-8375-6c5edfe18311-kube-api-access-2n7bm" (OuterVolumeSpecName: "kube-api-access-2n7bm") pod "5c5b6865-9b80-4832-8375-6c5edfe18311" (UID: "5c5b6865-9b80-4832-8375-6c5edfe18311"). InnerVolumeSpecName "kube-api-access-2n7bm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.762501 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c5b6865-9b80-4832-8375-6c5edfe18311-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5c5b6865-9b80-4832-8375-6c5edfe18311" (UID: "5c5b6865-9b80-4832-8375-6c5edfe18311"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.858018 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fd87f5f0-c377-48e6-b58a-893c47cd88cb-serving-cert\") pod \"controller-manager-76df65d987-xclgl\" (UID: \"fd87f5f0-c377-48e6-b58a-893c47cd88cb\") " pod="openshift-controller-manager/controller-manager-76df65d987-xclgl" Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.858089 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fd87f5f0-c377-48e6-b58a-893c47cd88cb-config\") pod \"controller-manager-76df65d987-xclgl\" (UID: \"fd87f5f0-c377-48e6-b58a-893c47cd88cb\") " pod="openshift-controller-manager/controller-manager-76df65d987-xclgl" Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.858136 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6mz9l\" (UniqueName: \"kubernetes.io/projected/fd87f5f0-c377-48e6-b58a-893c47cd88cb-kube-api-access-6mz9l\") pod \"controller-manager-76df65d987-xclgl\" (UID: \"fd87f5f0-c377-48e6-b58a-893c47cd88cb\") " pod="openshift-controller-manager/controller-manager-76df65d987-xclgl" Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.858171 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fd87f5f0-c377-48e6-b58a-893c47cd88cb-client-ca\") pod \"controller-manager-76df65d987-xclgl\" (UID: \"fd87f5f0-c377-48e6-b58a-893c47cd88cb\") " pod="openshift-controller-manager/controller-manager-76df65d987-xclgl" Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.858239 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/fd87f5f0-c377-48e6-b58a-893c47cd88cb-proxy-ca-bundles\") pod \"controller-manager-76df65d987-xclgl\" (UID: \"fd87f5f0-c377-48e6-b58a-893c47cd88cb\") " pod="openshift-controller-manager/controller-manager-76df65d987-xclgl" Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.858305 4716 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5c5b6865-9b80-4832-8375-6c5edfe18311-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.858322 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2n7bm\" (UniqueName: \"kubernetes.io/projected/5c5b6865-9b80-4832-8375-6c5edfe18311-kube-api-access-2n7bm\") on node \"crc\" DevicePath \"\"" Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.859273 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fd87f5f0-c377-48e6-b58a-893c47cd88cb-client-ca\") pod \"controller-manager-76df65d987-xclgl\" (UID: \"fd87f5f0-c377-48e6-b58a-893c47cd88cb\") " pod="openshift-controller-manager/controller-manager-76df65d987-xclgl" Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.859386 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/fd87f5f0-c377-48e6-b58a-893c47cd88cb-proxy-ca-bundles\") pod \"controller-manager-76df65d987-xclgl\" (UID: \"fd87f5f0-c377-48e6-b58a-893c47cd88cb\") " pod="openshift-controller-manager/controller-manager-76df65d987-xclgl" Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.860476 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fd87f5f0-c377-48e6-b58a-893c47cd88cb-config\") pod \"controller-manager-76df65d987-xclgl\" (UID: \"fd87f5f0-c377-48e6-b58a-893c47cd88cb\") " pod="openshift-controller-manager/controller-manager-76df65d987-xclgl" Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.863403 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fd87f5f0-c377-48e6-b58a-893c47cd88cb-serving-cert\") pod \"controller-manager-76df65d987-xclgl\" (UID: \"fd87f5f0-c377-48e6-b58a-893c47cd88cb\") " pod="openshift-controller-manager/controller-manager-76df65d987-xclgl" Dec 09 15:13:34 crc kubenswrapper[4716]: I1209 15:13:34.879490 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6mz9l\" (UniqueName: \"kubernetes.io/projected/fd87f5f0-c377-48e6-b58a-893c47cd88cb-kube-api-access-6mz9l\") pod \"controller-manager-76df65d987-xclgl\" (UID: \"fd87f5f0-c377-48e6-b58a-893c47cd88cb\") " pod="openshift-controller-manager/controller-manager-76df65d987-xclgl" Dec 09 15:13:35 crc kubenswrapper[4716]: I1209 15:13:35.034839 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-76df65d987-xclgl" Dec 09 15:13:35 crc kubenswrapper[4716]: I1209 15:13:35.226269 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4acd966d-4bae-456f-bffd-9ad6533cc66d" path="/var/lib/kubelet/pods/4acd966d-4bae-456f-bffd-9ad6533cc66d/volumes" Dec 09 15:13:35 crc kubenswrapper[4716]: I1209 15:13:35.227747 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d90937ae-2446-41f4-94eb-e928c5d449de" path="/var/lib/kubelet/pods/d90937ae-2446-41f4-94eb-e928c5d449de/volumes" Dec 09 15:13:35 crc kubenswrapper[4716]: I1209 15:13:35.261666 4716 generic.go:334] "Generic (PLEG): container finished" podID="5c5b6865-9b80-4832-8375-6c5edfe18311" containerID="fbebbbd9f5e592a2cef05f9ef4c79a278dc22b4b627189bf1e6bd405c431320a" exitCode=0 Dec 09 15:13:35 crc kubenswrapper[4716]: I1209 15:13:35.261819 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6bb9db44d5-hvc56" Dec 09 15:13:35 crc kubenswrapper[4716]: I1209 15:13:35.262237 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6bb9db44d5-hvc56" event={"ID":"5c5b6865-9b80-4832-8375-6c5edfe18311","Type":"ContainerDied","Data":"fbebbbd9f5e592a2cef05f9ef4c79a278dc22b4b627189bf1e6bd405c431320a"} Dec 09 15:13:35 crc kubenswrapper[4716]: I1209 15:13:35.262273 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6bb9db44d5-hvc56" event={"ID":"5c5b6865-9b80-4832-8375-6c5edfe18311","Type":"ContainerDied","Data":"282e65f48eedfbc6ac194b6b70461aa82ffc74abb903ab1343a0ee6b28654b22"} Dec 09 15:13:35 crc kubenswrapper[4716]: I1209 15:13:35.262293 4716 scope.go:117] "RemoveContainer" containerID="fbebbbd9f5e592a2cef05f9ef4c79a278dc22b4b627189bf1e6bd405c431320a" Dec 09 15:13:35 crc kubenswrapper[4716]: I1209 15:13:35.264261 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-route-controller-manager_route-controller-manager-64f96cc955-p6qqp_2b420902-4d80-4431-9459-99b901cc2a03/route-controller-manager/0.log" Dec 09 15:13:35 crc kubenswrapper[4716]: I1209 15:13:35.264300 4716 generic.go:334] "Generic (PLEG): container finished" podID="2b420902-4d80-4431-9459-99b901cc2a03" containerID="3479a6981ebe4361a997a234e2c9dcbd8a14ecbc83c41dd9e2a07c68d93e710d" exitCode=255 Dec 09 15:13:35 crc kubenswrapper[4716]: I1209 15:13:35.264348 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-p6qqp" event={"ID":"2b420902-4d80-4431-9459-99b901cc2a03","Type":"ContainerDied","Data":"3479a6981ebe4361a997a234e2c9dcbd8a14ecbc83c41dd9e2a07c68d93e710d"} Dec 09 15:13:35 crc kubenswrapper[4716]: I1209 15:13:35.266563 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Dec 09 15:13:35 crc kubenswrapper[4716]: I1209 15:13:35.267661 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"2831136b0590d3cc64d740566a5c119aa66c7c39d901bdc69baf24a260453221"} Dec 09 15:13:35 crc kubenswrapper[4716]: I1209 15:13:35.279181 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-76df65d987-xclgl"] Dec 09 15:13:35 crc kubenswrapper[4716]: I1209 15:13:35.306728 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-route-controller-manager_route-controller-manager-64f96cc955-p6qqp_2b420902-4d80-4431-9459-99b901cc2a03/route-controller-manager/0.log" Dec 09 15:13:35 crc kubenswrapper[4716]: I1209 15:13:35.306813 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-p6qqp" Dec 09 15:13:35 crc kubenswrapper[4716]: I1209 15:13:35.309800 4716 scope.go:117] "RemoveContainer" containerID="fbebbbd9f5e592a2cef05f9ef4c79a278dc22b4b627189bf1e6bd405c431320a" Dec 09 15:13:35 crc kubenswrapper[4716]: E1209 15:13:35.310218 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fbebbbd9f5e592a2cef05f9ef4c79a278dc22b4b627189bf1e6bd405c431320a\": container with ID starting with fbebbbd9f5e592a2cef05f9ef4c79a278dc22b4b627189bf1e6bd405c431320a not found: ID does not exist" containerID="fbebbbd9f5e592a2cef05f9ef4c79a278dc22b4b627189bf1e6bd405c431320a" Dec 09 15:13:35 crc kubenswrapper[4716]: I1209 15:13:35.310259 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fbebbbd9f5e592a2cef05f9ef4c79a278dc22b4b627189bf1e6bd405c431320a"} err="failed to get container status \"fbebbbd9f5e592a2cef05f9ef4c79a278dc22b4b627189bf1e6bd405c431320a\": rpc error: code = NotFound desc = could not find container \"fbebbbd9f5e592a2cef05f9ef4c79a278dc22b4b627189bf1e6bd405c431320a\": container with ID starting with fbebbbd9f5e592a2cef05f9ef4c79a278dc22b4b627189bf1e6bd405c431320a not found: ID does not exist" Dec 09 15:13:35 crc kubenswrapper[4716]: I1209 15:13:35.317358 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6bb9db44d5-hvc56"] Dec 09 15:13:35 crc kubenswrapper[4716]: I1209 15:13:35.321506 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-6bb9db44d5-hvc56"] Dec 09 15:13:35 crc kubenswrapper[4716]: I1209 15:13:35.364924 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vh2v4\" (UniqueName: \"kubernetes.io/projected/2b420902-4d80-4431-9459-99b901cc2a03-kube-api-access-vh2v4\") pod \"2b420902-4d80-4431-9459-99b901cc2a03\" (UID: \"2b420902-4d80-4431-9459-99b901cc2a03\") " Dec 09 15:13:35 crc kubenswrapper[4716]: I1209 15:13:35.365016 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2b420902-4d80-4431-9459-99b901cc2a03-serving-cert\") pod \"2b420902-4d80-4431-9459-99b901cc2a03\" (UID: \"2b420902-4d80-4431-9459-99b901cc2a03\") " Dec 09 15:13:35 crc kubenswrapper[4716]: I1209 15:13:35.365051 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b420902-4d80-4431-9459-99b901cc2a03-config\") pod \"2b420902-4d80-4431-9459-99b901cc2a03\" (UID: \"2b420902-4d80-4431-9459-99b901cc2a03\") " Dec 09 15:13:35 crc kubenswrapper[4716]: I1209 15:13:35.365135 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2b420902-4d80-4431-9459-99b901cc2a03-client-ca\") pod \"2b420902-4d80-4431-9459-99b901cc2a03\" (UID: \"2b420902-4d80-4431-9459-99b901cc2a03\") " Dec 09 15:13:35 crc kubenswrapper[4716]: I1209 15:13:35.366817 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b420902-4d80-4431-9459-99b901cc2a03-config" (OuterVolumeSpecName: "config") pod "2b420902-4d80-4431-9459-99b901cc2a03" (UID: "2b420902-4d80-4431-9459-99b901cc2a03"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:13:35 crc kubenswrapper[4716]: I1209 15:13:35.366985 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b420902-4d80-4431-9459-99b901cc2a03-client-ca" (OuterVolumeSpecName: "client-ca") pod "2b420902-4d80-4431-9459-99b901cc2a03" (UID: "2b420902-4d80-4431-9459-99b901cc2a03"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:13:35 crc kubenswrapper[4716]: I1209 15:13:35.370585 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b420902-4d80-4431-9459-99b901cc2a03-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "2b420902-4d80-4431-9459-99b901cc2a03" (UID: "2b420902-4d80-4431-9459-99b901cc2a03"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:13:35 crc kubenswrapper[4716]: I1209 15:13:35.375938 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b420902-4d80-4431-9459-99b901cc2a03-kube-api-access-vh2v4" (OuterVolumeSpecName: "kube-api-access-vh2v4") pod "2b420902-4d80-4431-9459-99b901cc2a03" (UID: "2b420902-4d80-4431-9459-99b901cc2a03"). InnerVolumeSpecName "kube-api-access-vh2v4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:13:35 crc kubenswrapper[4716]: I1209 15:13:35.466967 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vh2v4\" (UniqueName: \"kubernetes.io/projected/2b420902-4d80-4431-9459-99b901cc2a03-kube-api-access-vh2v4\") on node \"crc\" DevicePath \"\"" Dec 09 15:13:35 crc kubenswrapper[4716]: I1209 15:13:35.467013 4716 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2b420902-4d80-4431-9459-99b901cc2a03-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:13:35 crc kubenswrapper[4716]: I1209 15:13:35.467025 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b420902-4d80-4431-9459-99b901cc2a03-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:13:35 crc kubenswrapper[4716]: I1209 15:13:35.467035 4716 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2b420902-4d80-4431-9459-99b901cc2a03-client-ca\") on node \"crc\" DevicePath \"\"" Dec 09 15:13:36 crc kubenswrapper[4716]: I1209 15:13:36.275985 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-76df65d987-xclgl" event={"ID":"fd87f5f0-c377-48e6-b58a-893c47cd88cb","Type":"ContainerStarted","Data":"033d548da9ce1adf788a4155f023b4644c97459a0fca2459867009c22b4671ba"} Dec 09 15:13:36 crc kubenswrapper[4716]: I1209 15:13:36.276462 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-76df65d987-xclgl" event={"ID":"fd87f5f0-c377-48e6-b58a-893c47cd88cb","Type":"ContainerStarted","Data":"3537f526db738666860d60e029d70459915a2f81dab7bad07bc1e44957b1e3a9"} Dec 09 15:13:36 crc kubenswrapper[4716]: I1209 15:13:36.277108 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-76df65d987-xclgl" Dec 09 15:13:36 crc kubenswrapper[4716]: I1209 15:13:36.279025 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-route-controller-manager_route-controller-manager-64f96cc955-p6qqp_2b420902-4d80-4431-9459-99b901cc2a03/route-controller-manager/0.log" Dec 09 15:13:36 crc kubenswrapper[4716]: I1209 15:13:36.279096 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-p6qqp" event={"ID":"2b420902-4d80-4431-9459-99b901cc2a03","Type":"ContainerDied","Data":"2edc60e45ff2334073d0b224eb91e24d033f92663d22f7a83e8b7a75061cb75b"} Dec 09 15:13:36 crc kubenswrapper[4716]: I1209 15:13:36.279136 4716 scope.go:117] "RemoveContainer" containerID="3479a6981ebe4361a997a234e2c9dcbd8a14ecbc83c41dd9e2a07c68d93e710d" Dec 09 15:13:36 crc kubenswrapper[4716]: I1209 15:13:36.279255 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-p6qqp" Dec 09 15:13:36 crc kubenswrapper[4716]: I1209 15:13:36.287582 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-76df65d987-xclgl" Dec 09 15:13:36 crc kubenswrapper[4716]: I1209 15:13:36.305359 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-76df65d987-xclgl" podStartSLOduration=3.305343192 podStartE2EDuration="3.305343192s" podCreationTimestamp="2025-12-09 15:13:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:13:36.301772696 +0000 UTC m=+303.456516694" watchObservedRunningTime="2025-12-09 15:13:36.305343192 +0000 UTC m=+303.460087180" Dec 09 15:13:36 crc kubenswrapper[4716]: I1209 15:13:36.340253 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-64f96cc955-p6qqp"] Dec 09 15:13:36 crc kubenswrapper[4716]: I1209 15:13:36.344363 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-64f96cc955-p6qqp"] Dec 09 15:13:36 crc kubenswrapper[4716]: E1209 15:13:36.423068 4716 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd90937ae_2446_41f4_94eb_e928c5d449de.slice/crio-c4df3decba4cf0113c63157c9570993add444413bb09039c1c08e79e11969790\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd90937ae_2446_41f4_94eb_e928c5d449de.slice\": RecentStats: unable to find data in memory cache]" Dec 09 15:13:37 crc kubenswrapper[4716]: I1209 15:13:37.222830 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b420902-4d80-4431-9459-99b901cc2a03" path="/var/lib/kubelet/pods/2b420902-4d80-4431-9459-99b901cc2a03/volumes" Dec 09 15:13:37 crc kubenswrapper[4716]: I1209 15:13:37.223413 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5c5b6865-9b80-4832-8375-6c5edfe18311" path="/var/lib/kubelet/pods/5c5b6865-9b80-4832-8375-6c5edfe18311/volumes" Dec 09 15:13:37 crc kubenswrapper[4716]: I1209 15:13:37.371509 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-775b55bb67-rf79d"] Dec 09 15:13:37 crc kubenswrapper[4716]: E1209 15:13:37.372353 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b420902-4d80-4431-9459-99b901cc2a03" containerName="route-controller-manager" Dec 09 15:13:37 crc kubenswrapper[4716]: I1209 15:13:37.372435 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b420902-4d80-4431-9459-99b901cc2a03" containerName="route-controller-manager" Dec 09 15:13:37 crc kubenswrapper[4716]: I1209 15:13:37.372591 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b420902-4d80-4431-9459-99b901cc2a03" containerName="route-controller-manager" Dec 09 15:13:37 crc kubenswrapper[4716]: I1209 15:13:37.373159 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-775b55bb67-rf79d" Dec 09 15:13:37 crc kubenswrapper[4716]: I1209 15:13:37.375414 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Dec 09 15:13:37 crc kubenswrapper[4716]: I1209 15:13:37.377517 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Dec 09 15:13:37 crc kubenswrapper[4716]: I1209 15:13:37.378669 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Dec 09 15:13:37 crc kubenswrapper[4716]: I1209 15:13:37.378790 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Dec 09 15:13:37 crc kubenswrapper[4716]: I1209 15:13:37.378907 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Dec 09 15:13:37 crc kubenswrapper[4716]: I1209 15:13:37.379006 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Dec 09 15:13:37 crc kubenswrapper[4716]: I1209 15:13:37.383893 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-775b55bb67-rf79d"] Dec 09 15:13:37 crc kubenswrapper[4716]: I1209 15:13:37.400206 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3501701f-68b4-4e0c-8bdc-2b005b838e5c-serving-cert\") pod \"route-controller-manager-775b55bb67-rf79d\" (UID: \"3501701f-68b4-4e0c-8bdc-2b005b838e5c\") " pod="openshift-route-controller-manager/route-controller-manager-775b55bb67-rf79d" Dec 09 15:13:37 crc kubenswrapper[4716]: I1209 15:13:37.400282 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bvb8s\" (UniqueName: \"kubernetes.io/projected/3501701f-68b4-4e0c-8bdc-2b005b838e5c-kube-api-access-bvb8s\") pod \"route-controller-manager-775b55bb67-rf79d\" (UID: \"3501701f-68b4-4e0c-8bdc-2b005b838e5c\") " pod="openshift-route-controller-manager/route-controller-manager-775b55bb67-rf79d" Dec 09 15:13:37 crc kubenswrapper[4716]: I1209 15:13:37.400411 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3501701f-68b4-4e0c-8bdc-2b005b838e5c-config\") pod \"route-controller-manager-775b55bb67-rf79d\" (UID: \"3501701f-68b4-4e0c-8bdc-2b005b838e5c\") " pod="openshift-route-controller-manager/route-controller-manager-775b55bb67-rf79d" Dec 09 15:13:37 crc kubenswrapper[4716]: I1209 15:13:37.401284 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3501701f-68b4-4e0c-8bdc-2b005b838e5c-client-ca\") pod \"route-controller-manager-775b55bb67-rf79d\" (UID: \"3501701f-68b4-4e0c-8bdc-2b005b838e5c\") " pod="openshift-route-controller-manager/route-controller-manager-775b55bb67-rf79d" Dec 09 15:13:37 crc kubenswrapper[4716]: I1209 15:13:37.502566 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3501701f-68b4-4e0c-8bdc-2b005b838e5c-client-ca\") pod \"route-controller-manager-775b55bb67-rf79d\" (UID: \"3501701f-68b4-4e0c-8bdc-2b005b838e5c\") " pod="openshift-route-controller-manager/route-controller-manager-775b55bb67-rf79d" Dec 09 15:13:37 crc kubenswrapper[4716]: I1209 15:13:37.502671 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3501701f-68b4-4e0c-8bdc-2b005b838e5c-serving-cert\") pod \"route-controller-manager-775b55bb67-rf79d\" (UID: \"3501701f-68b4-4e0c-8bdc-2b005b838e5c\") " pod="openshift-route-controller-manager/route-controller-manager-775b55bb67-rf79d" Dec 09 15:13:37 crc kubenswrapper[4716]: I1209 15:13:37.502703 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bvb8s\" (UniqueName: \"kubernetes.io/projected/3501701f-68b4-4e0c-8bdc-2b005b838e5c-kube-api-access-bvb8s\") pod \"route-controller-manager-775b55bb67-rf79d\" (UID: \"3501701f-68b4-4e0c-8bdc-2b005b838e5c\") " pod="openshift-route-controller-manager/route-controller-manager-775b55bb67-rf79d" Dec 09 15:13:37 crc kubenswrapper[4716]: I1209 15:13:37.502767 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3501701f-68b4-4e0c-8bdc-2b005b838e5c-config\") pod \"route-controller-manager-775b55bb67-rf79d\" (UID: \"3501701f-68b4-4e0c-8bdc-2b005b838e5c\") " pod="openshift-route-controller-manager/route-controller-manager-775b55bb67-rf79d" Dec 09 15:13:37 crc kubenswrapper[4716]: I1209 15:13:37.503878 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3501701f-68b4-4e0c-8bdc-2b005b838e5c-client-ca\") pod \"route-controller-manager-775b55bb67-rf79d\" (UID: \"3501701f-68b4-4e0c-8bdc-2b005b838e5c\") " pod="openshift-route-controller-manager/route-controller-manager-775b55bb67-rf79d" Dec 09 15:13:37 crc kubenswrapper[4716]: I1209 15:13:37.504497 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3501701f-68b4-4e0c-8bdc-2b005b838e5c-config\") pod \"route-controller-manager-775b55bb67-rf79d\" (UID: \"3501701f-68b4-4e0c-8bdc-2b005b838e5c\") " pod="openshift-route-controller-manager/route-controller-manager-775b55bb67-rf79d" Dec 09 15:13:37 crc kubenswrapper[4716]: I1209 15:13:37.518036 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3501701f-68b4-4e0c-8bdc-2b005b838e5c-serving-cert\") pod \"route-controller-manager-775b55bb67-rf79d\" (UID: \"3501701f-68b4-4e0c-8bdc-2b005b838e5c\") " pod="openshift-route-controller-manager/route-controller-manager-775b55bb67-rf79d" Dec 09 15:13:37 crc kubenswrapper[4716]: I1209 15:13:37.522109 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bvb8s\" (UniqueName: \"kubernetes.io/projected/3501701f-68b4-4e0c-8bdc-2b005b838e5c-kube-api-access-bvb8s\") pod \"route-controller-manager-775b55bb67-rf79d\" (UID: \"3501701f-68b4-4e0c-8bdc-2b005b838e5c\") " pod="openshift-route-controller-manager/route-controller-manager-775b55bb67-rf79d" Dec 09 15:13:37 crc kubenswrapper[4716]: I1209 15:13:37.698837 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-775b55bb67-rf79d" Dec 09 15:13:37 crc kubenswrapper[4716]: I1209 15:13:37.944580 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-775b55bb67-rf79d"] Dec 09 15:13:37 crc kubenswrapper[4716]: W1209 15:13:37.966940 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3501701f_68b4_4e0c_8bdc_2b005b838e5c.slice/crio-7da97f2d32b4dd950f222e9cff785357f2a99a45ae1d01a54151bc01ca483aed WatchSource:0}: Error finding container 7da97f2d32b4dd950f222e9cff785357f2a99a45ae1d01a54151bc01ca483aed: Status 404 returned error can't find the container with id 7da97f2d32b4dd950f222e9cff785357f2a99a45ae1d01a54151bc01ca483aed Dec 09 15:13:38 crc kubenswrapper[4716]: I1209 15:13:38.308338 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-775b55bb67-rf79d" event={"ID":"3501701f-68b4-4e0c-8bdc-2b005b838e5c","Type":"ContainerStarted","Data":"550e98e15d1b4c863674e43b7eda9e037cad96d385582a3583b636c4d97c7b97"} Dec 09 15:13:38 crc kubenswrapper[4716]: I1209 15:13:38.308409 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-775b55bb67-rf79d" event={"ID":"3501701f-68b4-4e0c-8bdc-2b005b838e5c","Type":"ContainerStarted","Data":"7da97f2d32b4dd950f222e9cff785357f2a99a45ae1d01a54151bc01ca483aed"} Dec 09 15:13:38 crc kubenswrapper[4716]: I1209 15:13:38.308797 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-775b55bb67-rf79d" Dec 09 15:13:38 crc kubenswrapper[4716]: I1209 15:13:38.338661 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-775b55bb67-rf79d" podStartSLOduration=5.338639533 podStartE2EDuration="5.338639533s" podCreationTimestamp="2025-12-09 15:13:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:13:38.337474978 +0000 UTC m=+305.492218966" watchObservedRunningTime="2025-12-09 15:13:38.338639533 +0000 UTC m=+305.493383521" Dec 09 15:13:38 crc kubenswrapper[4716]: I1209 15:13:38.647268 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-775b55bb67-rf79d" Dec 09 15:13:40 crc kubenswrapper[4716]: I1209 15:13:40.333219 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 15:13:44 crc kubenswrapper[4716]: I1209 15:13:44.071383 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 15:13:44 crc kubenswrapper[4716]: I1209 15:13:44.077027 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 15:13:44 crc kubenswrapper[4716]: I1209 15:13:44.354216 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 09 15:13:46 crc kubenswrapper[4716]: E1209 15:13:46.559709 4716 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd90937ae_2446_41f4_94eb_e928c5d449de.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd90937ae_2446_41f4_94eb_e928c5d449de.slice/crio-c4df3decba4cf0113c63157c9570993add444413bb09039c1c08e79e11969790\": RecentStats: unable to find data in memory cache]" Dec 09 15:13:52 crc kubenswrapper[4716]: I1209 15:13:52.519041 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-hgf8l"] Dec 09 15:13:52 crc kubenswrapper[4716]: I1209 15:13:52.520511 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-hgf8l" Dec 09 15:13:52 crc kubenswrapper[4716]: I1209 15:13:52.591197 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-hgf8l"] Dec 09 15:13:52 crc kubenswrapper[4716]: I1209 15:13:52.618946 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/13481ff2-81a1-43bd-8d71-79e724bdfd35-registry-tls\") pod \"image-registry-66df7c8f76-hgf8l\" (UID: \"13481ff2-81a1-43bd-8d71-79e724bdfd35\") " pod="openshift-image-registry/image-registry-66df7c8f76-hgf8l" Dec 09 15:13:52 crc kubenswrapper[4716]: I1209 15:13:52.619325 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/13481ff2-81a1-43bd-8d71-79e724bdfd35-bound-sa-token\") pod \"image-registry-66df7c8f76-hgf8l\" (UID: \"13481ff2-81a1-43bd-8d71-79e724bdfd35\") " pod="openshift-image-registry/image-registry-66df7c8f76-hgf8l" Dec 09 15:13:52 crc kubenswrapper[4716]: I1209 15:13:52.619476 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-hgf8l\" (UID: \"13481ff2-81a1-43bd-8d71-79e724bdfd35\") " pod="openshift-image-registry/image-registry-66df7c8f76-hgf8l" Dec 09 15:13:52 crc kubenswrapper[4716]: I1209 15:13:52.619599 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/13481ff2-81a1-43bd-8d71-79e724bdfd35-trusted-ca\") pod \"image-registry-66df7c8f76-hgf8l\" (UID: \"13481ff2-81a1-43bd-8d71-79e724bdfd35\") " pod="openshift-image-registry/image-registry-66df7c8f76-hgf8l" Dec 09 15:13:52 crc kubenswrapper[4716]: I1209 15:13:52.619714 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/13481ff2-81a1-43bd-8d71-79e724bdfd35-ca-trust-extracted\") pod \"image-registry-66df7c8f76-hgf8l\" (UID: \"13481ff2-81a1-43bd-8d71-79e724bdfd35\") " pod="openshift-image-registry/image-registry-66df7c8f76-hgf8l" Dec 09 15:13:52 crc kubenswrapper[4716]: I1209 15:13:52.619950 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/13481ff2-81a1-43bd-8d71-79e724bdfd35-installation-pull-secrets\") pod \"image-registry-66df7c8f76-hgf8l\" (UID: \"13481ff2-81a1-43bd-8d71-79e724bdfd35\") " pod="openshift-image-registry/image-registry-66df7c8f76-hgf8l" Dec 09 15:13:52 crc kubenswrapper[4716]: I1209 15:13:52.620151 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/13481ff2-81a1-43bd-8d71-79e724bdfd35-registry-certificates\") pod \"image-registry-66df7c8f76-hgf8l\" (UID: \"13481ff2-81a1-43bd-8d71-79e724bdfd35\") " pod="openshift-image-registry/image-registry-66df7c8f76-hgf8l" Dec 09 15:13:52 crc kubenswrapper[4716]: I1209 15:13:52.620223 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rl9md\" (UniqueName: \"kubernetes.io/projected/13481ff2-81a1-43bd-8d71-79e724bdfd35-kube-api-access-rl9md\") pod \"image-registry-66df7c8f76-hgf8l\" (UID: \"13481ff2-81a1-43bd-8d71-79e724bdfd35\") " pod="openshift-image-registry/image-registry-66df7c8f76-hgf8l" Dec 09 15:13:52 crc kubenswrapper[4716]: I1209 15:13:52.688569 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-hgf8l\" (UID: \"13481ff2-81a1-43bd-8d71-79e724bdfd35\") " pod="openshift-image-registry/image-registry-66df7c8f76-hgf8l" Dec 09 15:13:52 crc kubenswrapper[4716]: I1209 15:13:52.721732 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/13481ff2-81a1-43bd-8d71-79e724bdfd35-registry-tls\") pod \"image-registry-66df7c8f76-hgf8l\" (UID: \"13481ff2-81a1-43bd-8d71-79e724bdfd35\") " pod="openshift-image-registry/image-registry-66df7c8f76-hgf8l" Dec 09 15:13:52 crc kubenswrapper[4716]: I1209 15:13:52.722453 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/13481ff2-81a1-43bd-8d71-79e724bdfd35-bound-sa-token\") pod \"image-registry-66df7c8f76-hgf8l\" (UID: \"13481ff2-81a1-43bd-8d71-79e724bdfd35\") " pod="openshift-image-registry/image-registry-66df7c8f76-hgf8l" Dec 09 15:13:52 crc kubenswrapper[4716]: I1209 15:13:52.722586 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/13481ff2-81a1-43bd-8d71-79e724bdfd35-trusted-ca\") pod \"image-registry-66df7c8f76-hgf8l\" (UID: \"13481ff2-81a1-43bd-8d71-79e724bdfd35\") " pod="openshift-image-registry/image-registry-66df7c8f76-hgf8l" Dec 09 15:13:52 crc kubenswrapper[4716]: I1209 15:13:52.722706 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/13481ff2-81a1-43bd-8d71-79e724bdfd35-ca-trust-extracted\") pod \"image-registry-66df7c8f76-hgf8l\" (UID: \"13481ff2-81a1-43bd-8d71-79e724bdfd35\") " pod="openshift-image-registry/image-registry-66df7c8f76-hgf8l" Dec 09 15:13:52 crc kubenswrapper[4716]: I1209 15:13:52.722829 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/13481ff2-81a1-43bd-8d71-79e724bdfd35-installation-pull-secrets\") pod \"image-registry-66df7c8f76-hgf8l\" (UID: \"13481ff2-81a1-43bd-8d71-79e724bdfd35\") " pod="openshift-image-registry/image-registry-66df7c8f76-hgf8l" Dec 09 15:13:52 crc kubenswrapper[4716]: I1209 15:13:52.722941 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/13481ff2-81a1-43bd-8d71-79e724bdfd35-registry-certificates\") pod \"image-registry-66df7c8f76-hgf8l\" (UID: \"13481ff2-81a1-43bd-8d71-79e724bdfd35\") " pod="openshift-image-registry/image-registry-66df7c8f76-hgf8l" Dec 09 15:13:52 crc kubenswrapper[4716]: I1209 15:13:52.723032 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rl9md\" (UniqueName: \"kubernetes.io/projected/13481ff2-81a1-43bd-8d71-79e724bdfd35-kube-api-access-rl9md\") pod \"image-registry-66df7c8f76-hgf8l\" (UID: \"13481ff2-81a1-43bd-8d71-79e724bdfd35\") " pod="openshift-image-registry/image-registry-66df7c8f76-hgf8l" Dec 09 15:13:52 crc kubenswrapper[4716]: I1209 15:13:52.723417 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/13481ff2-81a1-43bd-8d71-79e724bdfd35-ca-trust-extracted\") pod \"image-registry-66df7c8f76-hgf8l\" (UID: \"13481ff2-81a1-43bd-8d71-79e724bdfd35\") " pod="openshift-image-registry/image-registry-66df7c8f76-hgf8l" Dec 09 15:13:52 crc kubenswrapper[4716]: I1209 15:13:52.724797 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/13481ff2-81a1-43bd-8d71-79e724bdfd35-registry-certificates\") pod \"image-registry-66df7c8f76-hgf8l\" (UID: \"13481ff2-81a1-43bd-8d71-79e724bdfd35\") " pod="openshift-image-registry/image-registry-66df7c8f76-hgf8l" Dec 09 15:13:52 crc kubenswrapper[4716]: I1209 15:13:52.725530 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/13481ff2-81a1-43bd-8d71-79e724bdfd35-trusted-ca\") pod \"image-registry-66df7c8f76-hgf8l\" (UID: \"13481ff2-81a1-43bd-8d71-79e724bdfd35\") " pod="openshift-image-registry/image-registry-66df7c8f76-hgf8l" Dec 09 15:13:52 crc kubenswrapper[4716]: I1209 15:13:52.734013 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/13481ff2-81a1-43bd-8d71-79e724bdfd35-registry-tls\") pod \"image-registry-66df7c8f76-hgf8l\" (UID: \"13481ff2-81a1-43bd-8d71-79e724bdfd35\") " pod="openshift-image-registry/image-registry-66df7c8f76-hgf8l" Dec 09 15:13:52 crc kubenswrapper[4716]: I1209 15:13:52.734359 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/13481ff2-81a1-43bd-8d71-79e724bdfd35-installation-pull-secrets\") pod \"image-registry-66df7c8f76-hgf8l\" (UID: \"13481ff2-81a1-43bd-8d71-79e724bdfd35\") " pod="openshift-image-registry/image-registry-66df7c8f76-hgf8l" Dec 09 15:13:52 crc kubenswrapper[4716]: I1209 15:13:52.747361 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rl9md\" (UniqueName: \"kubernetes.io/projected/13481ff2-81a1-43bd-8d71-79e724bdfd35-kube-api-access-rl9md\") pod \"image-registry-66df7c8f76-hgf8l\" (UID: \"13481ff2-81a1-43bd-8d71-79e724bdfd35\") " pod="openshift-image-registry/image-registry-66df7c8f76-hgf8l" Dec 09 15:13:52 crc kubenswrapper[4716]: I1209 15:13:52.756540 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/13481ff2-81a1-43bd-8d71-79e724bdfd35-bound-sa-token\") pod \"image-registry-66df7c8f76-hgf8l\" (UID: \"13481ff2-81a1-43bd-8d71-79e724bdfd35\") " pod="openshift-image-registry/image-registry-66df7c8f76-hgf8l" Dec 09 15:13:52 crc kubenswrapper[4716]: I1209 15:13:52.847039 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-hgf8l" Dec 09 15:13:53 crc kubenswrapper[4716]: I1209 15:13:53.374193 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-hgf8l"] Dec 09 15:13:53 crc kubenswrapper[4716]: W1209 15:13:53.378223 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod13481ff2_81a1_43bd_8d71_79e724bdfd35.slice/crio-24726a6f48b9222f2ef269f93fb83aac97f026de2451967b2d3b9c4370f6148b WatchSource:0}: Error finding container 24726a6f48b9222f2ef269f93fb83aac97f026de2451967b2d3b9c4370f6148b: Status 404 returned error can't find the container with id 24726a6f48b9222f2ef269f93fb83aac97f026de2451967b2d3b9c4370f6148b Dec 09 15:13:53 crc kubenswrapper[4716]: I1209 15:13:53.425149 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-hgf8l" event={"ID":"13481ff2-81a1-43bd-8d71-79e724bdfd35","Type":"ContainerStarted","Data":"24726a6f48b9222f2ef269f93fb83aac97f026de2451967b2d3b9c4370f6148b"} Dec 09 15:13:54 crc kubenswrapper[4716]: I1209 15:13:54.434640 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-hgf8l" event={"ID":"13481ff2-81a1-43bd-8d71-79e724bdfd35","Type":"ContainerStarted","Data":"d1e43e582a5d950e3d17dad3c05a0c3cd9554d49991206d2fb8140bb6e2bdf74"} Dec 09 15:13:54 crc kubenswrapper[4716]: I1209 15:13:54.435166 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-hgf8l" Dec 09 15:13:54 crc kubenswrapper[4716]: I1209 15:13:54.454353 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-hgf8l" podStartSLOduration=2.454329449 podStartE2EDuration="2.454329449s" podCreationTimestamp="2025-12-09 15:13:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:13:54.45336308 +0000 UTC m=+321.608107078" watchObservedRunningTime="2025-12-09 15:13:54.454329449 +0000 UTC m=+321.609073437" Dec 09 15:13:56 crc kubenswrapper[4716]: E1209 15:13:56.704902 4716 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd90937ae_2446_41f4_94eb_e928c5d449de.slice/crio-c4df3decba4cf0113c63157c9570993add444413bb09039c1c08e79e11969790\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd90937ae_2446_41f4_94eb_e928c5d449de.slice\": RecentStats: unable to find data in memory cache]" Dec 09 15:14:02 crc kubenswrapper[4716]: I1209 15:14:02.418240 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6grj2"] Dec 09 15:14:02 crc kubenswrapper[4716]: I1209 15:14:02.419386 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-6grj2" podUID="02be1451-6780-479c-ab94-37503fea3645" containerName="registry-server" containerID="cri-o://6bd0c4b854e56de90699d8d0d76ab06929112419f2d0565228fc83584b40774f" gracePeriod=30 Dec 09 15:14:02 crc kubenswrapper[4716]: I1209 15:14:02.434488 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-bfn6b"] Dec 09 15:14:02 crc kubenswrapper[4716]: I1209 15:14:02.434906 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-bfn6b" podUID="1cf42d24-d670-433b-8a60-11e6cadde0dd" containerName="registry-server" containerID="cri-o://9745c35448b658c3e07a00924fdb91a8cec39e5bef15b139aafc23ba470eba09" gracePeriod=30 Dec 09 15:14:02 crc kubenswrapper[4716]: I1209 15:14:02.446425 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4zqlr"] Dec 09 15:14:02 crc kubenswrapper[4716]: I1209 15:14:02.446837 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-4zqlr" podUID="3f60d900-cfcc-4840-8f6b-cdcb043a510b" containerName="registry-server" containerID="cri-o://7df4f90351c5e5d9d3c57257cc5fd6557a31cc1cd13daf6f75e25277e4aeb073" gracePeriod=30 Dec 09 15:14:02 crc kubenswrapper[4716]: I1209 15:14:02.452040 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-f8rnc"] Dec 09 15:14:02 crc kubenswrapper[4716]: I1209 15:14:02.452386 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-f8rnc" podUID="6e8a9e79-4b63-4c3f-968c-9524e682af80" containerName="registry-server" containerID="cri-o://50d5c3cb77dda1a27c7bf642211449fd91e602a0d9c72c62dfc0325a21b9ddea" gracePeriod=30 Dec 09 15:14:02 crc kubenswrapper[4716]: I1209 15:14:02.471484 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2rcsb"] Dec 09 15:14:02 crc kubenswrapper[4716]: I1209 15:14:02.471852 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-2rcsb" podUID="a25d26a9-7c6b-455e-9b8d-cc2fba08c576" containerName="marketplace-operator" containerID="cri-o://f41778fd6b384a6e965b5d23d789a31b9db3e35ff2f02881f43e7600ef01a0cf" gracePeriod=30 Dec 09 15:14:02 crc kubenswrapper[4716]: I1209 15:14:02.484450 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dsxtv"] Dec 09 15:14:02 crc kubenswrapper[4716]: I1209 15:14:02.484747 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-dsxtv" podUID="1fc78b58-085f-42f7-bec5-b28d0d2bc191" containerName="registry-server" containerID="cri-o://f509522db616bbeae8680f6d8f9a149d7b756f08ace1a4caab509ac375d07714" gracePeriod=30 Dec 09 15:14:02 crc kubenswrapper[4716]: I1209 15:14:02.539342 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-g5h2p"] Dec 09 15:14:02 crc kubenswrapper[4716]: I1209 15:14:02.559213 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-n8bgn"] Dec 09 15:14:02 crc kubenswrapper[4716]: I1209 15:14:02.559995 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-n8bgn" podUID="e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3" containerName="registry-server" containerID="cri-o://63be79a46108bbcee26991617a2f26a64e81eaa15ba40da0e4b588940190ff4f" gracePeriod=30 Dec 09 15:14:02 crc kubenswrapper[4716]: I1209 15:14:02.564119 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-g5h2p" Dec 09 15:14:02 crc kubenswrapper[4716]: I1209 15:14:02.569157 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5jwpg"] Dec 09 15:14:02 crc kubenswrapper[4716]: I1209 15:14:02.569653 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-5jwpg" podUID="85d4a173-8cf0-4f51-b713-6a8624461b61" containerName="registry-server" containerID="cri-o://2ce17d4f6f5bb72b5cc3c0e8133d8f4194518093419d32bc323a823c5874abbc" gracePeriod=30 Dec 09 15:14:02 crc kubenswrapper[4716]: I1209 15:14:02.577869 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-g5h2p"] Dec 09 15:14:02 crc kubenswrapper[4716]: I1209 15:14:02.610609 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/9197123c-d444-4d8e-94cd-8a9f61317430-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-g5h2p\" (UID: \"9197123c-d444-4d8e-94cd-8a9f61317430\") " pod="openshift-marketplace/marketplace-operator-79b997595-g5h2p" Dec 09 15:14:02 crc kubenswrapper[4716]: I1209 15:14:02.612798 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tdcjm\" (UniqueName: \"kubernetes.io/projected/9197123c-d444-4d8e-94cd-8a9f61317430-kube-api-access-tdcjm\") pod \"marketplace-operator-79b997595-g5h2p\" (UID: \"9197123c-d444-4d8e-94cd-8a9f61317430\") " pod="openshift-marketplace/marketplace-operator-79b997595-g5h2p" Dec 09 15:14:02 crc kubenswrapper[4716]: I1209 15:14:02.611150 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-v42h6"] Dec 09 15:14:02 crc kubenswrapper[4716]: I1209 15:14:02.622306 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-v42h6" podUID="07e1c69c-8b33-4342-8632-010554dfd1d5" containerName="registry-server" containerID="cri-o://86ec669b334f1f8428099c358a5874f0c67dd1eaec4ceb5b5bd3b8f03df3cd7c" gracePeriod=30 Dec 09 15:14:02 crc kubenswrapper[4716]: I1209 15:14:02.622896 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9197123c-d444-4d8e-94cd-8a9f61317430-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-g5h2p\" (UID: \"9197123c-d444-4d8e-94cd-8a9f61317430\") " pod="openshift-marketplace/marketplace-operator-79b997595-g5h2p" Dec 09 15:14:02 crc kubenswrapper[4716]: I1209 15:14:02.644975 4716 generic.go:334] "Generic (PLEG): container finished" podID="02be1451-6780-479c-ab94-37503fea3645" containerID="6bd0c4b854e56de90699d8d0d76ab06929112419f2d0565228fc83584b40774f" exitCode=0 Dec 09 15:14:02 crc kubenswrapper[4716]: I1209 15:14:02.645124 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6grj2" event={"ID":"02be1451-6780-479c-ab94-37503fea3645","Type":"ContainerDied","Data":"6bd0c4b854e56de90699d8d0d76ab06929112419f2d0565228fc83584b40774f"} Dec 09 15:14:02 crc kubenswrapper[4716]: I1209 15:14:02.659224 4716 generic.go:334] "Generic (PLEG): container finished" podID="1cf42d24-d670-433b-8a60-11e6cadde0dd" containerID="9745c35448b658c3e07a00924fdb91a8cec39e5bef15b139aafc23ba470eba09" exitCode=0 Dec 09 15:14:02 crc kubenswrapper[4716]: I1209 15:14:02.659283 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bfn6b" event={"ID":"1cf42d24-d670-433b-8a60-11e6cadde0dd","Type":"ContainerDied","Data":"9745c35448b658c3e07a00924fdb91a8cec39e5bef15b139aafc23ba470eba09"} Dec 09 15:14:02 crc kubenswrapper[4716]: I1209 15:14:02.724452 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/9197123c-d444-4d8e-94cd-8a9f61317430-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-g5h2p\" (UID: \"9197123c-d444-4d8e-94cd-8a9f61317430\") " pod="openshift-marketplace/marketplace-operator-79b997595-g5h2p" Dec 09 15:14:02 crc kubenswrapper[4716]: I1209 15:14:02.724533 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tdcjm\" (UniqueName: \"kubernetes.io/projected/9197123c-d444-4d8e-94cd-8a9f61317430-kube-api-access-tdcjm\") pod \"marketplace-operator-79b997595-g5h2p\" (UID: \"9197123c-d444-4d8e-94cd-8a9f61317430\") " pod="openshift-marketplace/marketplace-operator-79b997595-g5h2p" Dec 09 15:14:02 crc kubenswrapper[4716]: I1209 15:14:02.724569 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9197123c-d444-4d8e-94cd-8a9f61317430-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-g5h2p\" (UID: \"9197123c-d444-4d8e-94cd-8a9f61317430\") " pod="openshift-marketplace/marketplace-operator-79b997595-g5h2p" Dec 09 15:14:02 crc kubenswrapper[4716]: I1209 15:14:02.725880 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9197123c-d444-4d8e-94cd-8a9f61317430-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-g5h2p\" (UID: \"9197123c-d444-4d8e-94cd-8a9f61317430\") " pod="openshift-marketplace/marketplace-operator-79b997595-g5h2p" Dec 09 15:14:02 crc kubenswrapper[4716]: I1209 15:14:02.737637 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/9197123c-d444-4d8e-94cd-8a9f61317430-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-g5h2p\" (UID: \"9197123c-d444-4d8e-94cd-8a9f61317430\") " pod="openshift-marketplace/marketplace-operator-79b997595-g5h2p" Dec 09 15:14:02 crc kubenswrapper[4716]: I1209 15:14:02.759153 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tdcjm\" (UniqueName: \"kubernetes.io/projected/9197123c-d444-4d8e-94cd-8a9f61317430-kube-api-access-tdcjm\") pod \"marketplace-operator-79b997595-g5h2p\" (UID: \"9197123c-d444-4d8e-94cd-8a9f61317430\") " pod="openshift-marketplace/marketplace-operator-79b997595-g5h2p" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.179868 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-g5h2p" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.188261 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dsxtv" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.231916 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1fc78b58-085f-42f7-bec5-b28d0d2bc191-utilities\") pod \"1fc78b58-085f-42f7-bec5-b28d0d2bc191\" (UID: \"1fc78b58-085f-42f7-bec5-b28d0d2bc191\") " Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.231964 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1fc78b58-085f-42f7-bec5-b28d0d2bc191-catalog-content\") pod \"1fc78b58-085f-42f7-bec5-b28d0d2bc191\" (UID: \"1fc78b58-085f-42f7-bec5-b28d0d2bc191\") " Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.231996 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ms8q9\" (UniqueName: \"kubernetes.io/projected/1fc78b58-085f-42f7-bec5-b28d0d2bc191-kube-api-access-ms8q9\") pod \"1fc78b58-085f-42f7-bec5-b28d0d2bc191\" (UID: \"1fc78b58-085f-42f7-bec5-b28d0d2bc191\") " Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.244190 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1fc78b58-085f-42f7-bec5-b28d0d2bc191-utilities" (OuterVolumeSpecName: "utilities") pod "1fc78b58-085f-42f7-bec5-b28d0d2bc191" (UID: "1fc78b58-085f-42f7-bec5-b28d0d2bc191"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.257535 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1fc78b58-085f-42f7-bec5-b28d0d2bc191-kube-api-access-ms8q9" (OuterVolumeSpecName: "kube-api-access-ms8q9") pod "1fc78b58-085f-42f7-bec5-b28d0d2bc191" (UID: "1fc78b58-085f-42f7-bec5-b28d0d2bc191"). InnerVolumeSpecName "kube-api-access-ms8q9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.259610 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4zqlr"] Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.260821 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1fc78b58-085f-42f7-bec5-b28d0d2bc191-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1fc78b58-085f-42f7-bec5-b28d0d2bc191" (UID: "1fc78b58-085f-42f7-bec5-b28d0d2bc191"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.334451 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1fc78b58-085f-42f7-bec5-b28d0d2bc191-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.334535 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1fc78b58-085f-42f7-bec5-b28d0d2bc191-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.334551 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ms8q9\" (UniqueName: \"kubernetes.io/projected/1fc78b58-085f-42f7-bec5-b28d0d2bc191-kube-api-access-ms8q9\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.376391 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6grj2" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.383270 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4zqlr" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.386744 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-f8rnc" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.389562 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-2rcsb" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.416228 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bfn6b" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.435813 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a25d26a9-7c6b-455e-9b8d-cc2fba08c576-marketplace-trusted-ca\") pod \"a25d26a9-7c6b-455e-9b8d-cc2fba08c576\" (UID: \"a25d26a9-7c6b-455e-9b8d-cc2fba08c576\") " Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.435913 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1cf42d24-d670-433b-8a60-11e6cadde0dd-catalog-content\") pod \"1cf42d24-d670-433b-8a60-11e6cadde0dd\" (UID: \"1cf42d24-d670-433b-8a60-11e6cadde0dd\") " Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.435982 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/a25d26a9-7c6b-455e-9b8d-cc2fba08c576-marketplace-operator-metrics\") pod \"a25d26a9-7c6b-455e-9b8d-cc2fba08c576\" (UID: \"a25d26a9-7c6b-455e-9b8d-cc2fba08c576\") " Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.436002 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5jq5p\" (UniqueName: \"kubernetes.io/projected/a25d26a9-7c6b-455e-9b8d-cc2fba08c576-kube-api-access-5jq5p\") pod \"a25d26a9-7c6b-455e-9b8d-cc2fba08c576\" (UID: \"a25d26a9-7c6b-455e-9b8d-cc2fba08c576\") " Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.436075 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e8a9e79-4b63-4c3f-968c-9524e682af80-utilities\") pod \"6e8a9e79-4b63-4c3f-968c-9524e682af80\" (UID: \"6e8a9e79-4b63-4c3f-968c-9524e682af80\") " Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.436101 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/02be1451-6780-479c-ab94-37503fea3645-catalog-content\") pod \"02be1451-6780-479c-ab94-37503fea3645\" (UID: \"02be1451-6780-479c-ab94-37503fea3645\") " Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.436117 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hpcnm\" (UniqueName: \"kubernetes.io/projected/1cf42d24-d670-433b-8a60-11e6cadde0dd-kube-api-access-hpcnm\") pod \"1cf42d24-d670-433b-8a60-11e6cadde0dd\" (UID: \"1cf42d24-d670-433b-8a60-11e6cadde0dd\") " Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.436135 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e8a9e79-4b63-4c3f-968c-9524e682af80-catalog-content\") pod \"6e8a9e79-4b63-4c3f-968c-9524e682af80\" (UID: \"6e8a9e79-4b63-4c3f-968c-9524e682af80\") " Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.436162 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hg5hh\" (UniqueName: \"kubernetes.io/projected/6e8a9e79-4b63-4c3f-968c-9524e682af80-kube-api-access-hg5hh\") pod \"6e8a9e79-4b63-4c3f-968c-9524e682af80\" (UID: \"6e8a9e79-4b63-4c3f-968c-9524e682af80\") " Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.436197 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1cf42d24-d670-433b-8a60-11e6cadde0dd-utilities\") pod \"1cf42d24-d670-433b-8a60-11e6cadde0dd\" (UID: \"1cf42d24-d670-433b-8a60-11e6cadde0dd\") " Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.436224 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/02be1451-6780-479c-ab94-37503fea3645-utilities\") pod \"02be1451-6780-479c-ab94-37503fea3645\" (UID: \"02be1451-6780-479c-ab94-37503fea3645\") " Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.436251 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3f60d900-cfcc-4840-8f6b-cdcb043a510b-catalog-content\") pod \"3f60d900-cfcc-4840-8f6b-cdcb043a510b\" (UID: \"3f60d900-cfcc-4840-8f6b-cdcb043a510b\") " Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.436287 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxb4v\" (UniqueName: \"kubernetes.io/projected/02be1451-6780-479c-ab94-37503fea3645-kube-api-access-wxb4v\") pod \"02be1451-6780-479c-ab94-37503fea3645\" (UID: \"02be1451-6780-479c-ab94-37503fea3645\") " Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.436322 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3f60d900-cfcc-4840-8f6b-cdcb043a510b-utilities\") pod \"3f60d900-cfcc-4840-8f6b-cdcb043a510b\" (UID: \"3f60d900-cfcc-4840-8f6b-cdcb043a510b\") " Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.436366 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mdlpn\" (UniqueName: \"kubernetes.io/projected/3f60d900-cfcc-4840-8f6b-cdcb043a510b-kube-api-access-mdlpn\") pod \"3f60d900-cfcc-4840-8f6b-cdcb043a510b\" (UID: \"3f60d900-cfcc-4840-8f6b-cdcb043a510b\") " Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.437690 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-bfn6b"] Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.438534 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/02be1451-6780-479c-ab94-37503fea3645-utilities" (OuterVolumeSpecName: "utilities") pod "02be1451-6780-479c-ab94-37503fea3645" (UID: "02be1451-6780-479c-ab94-37503fea3645"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.438577 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6e8a9e79-4b63-4c3f-968c-9524e682af80-utilities" (OuterVolumeSpecName: "utilities") pod "6e8a9e79-4b63-4c3f-968c-9524e682af80" (UID: "6e8a9e79-4b63-4c3f-968c-9524e682af80"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.442424 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1cf42d24-d670-433b-8a60-11e6cadde0dd-utilities" (OuterVolumeSpecName: "utilities") pod "1cf42d24-d670-433b-8a60-11e6cadde0dd" (UID: "1cf42d24-d670-433b-8a60-11e6cadde0dd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.443724 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3f60d900-cfcc-4840-8f6b-cdcb043a510b-utilities" (OuterVolumeSpecName: "utilities") pod "3f60d900-cfcc-4840-8f6b-cdcb043a510b" (UID: "3f60d900-cfcc-4840-8f6b-cdcb043a510b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.450673 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e8a9e79-4b63-4c3f-968c-9524e682af80-kube-api-access-hg5hh" (OuterVolumeSpecName: "kube-api-access-hg5hh") pod "6e8a9e79-4b63-4c3f-968c-9524e682af80" (UID: "6e8a9e79-4b63-4c3f-968c-9524e682af80"). InnerVolumeSpecName "kube-api-access-hg5hh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.451129 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f60d900-cfcc-4840-8f6b-cdcb043a510b-kube-api-access-mdlpn" (OuterVolumeSpecName: "kube-api-access-mdlpn") pod "3f60d900-cfcc-4840-8f6b-cdcb043a510b" (UID: "3f60d900-cfcc-4840-8f6b-cdcb043a510b"). InnerVolumeSpecName "kube-api-access-mdlpn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.453443 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/02be1451-6780-479c-ab94-37503fea3645-kube-api-access-wxb4v" (OuterVolumeSpecName: "kube-api-access-wxb4v") pod "02be1451-6780-479c-ab94-37503fea3645" (UID: "02be1451-6780-479c-ab94-37503fea3645"). InnerVolumeSpecName "kube-api-access-wxb4v". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.453608 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a25d26a9-7c6b-455e-9b8d-cc2fba08c576-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "a25d26a9-7c6b-455e-9b8d-cc2fba08c576" (UID: "a25d26a9-7c6b-455e-9b8d-cc2fba08c576"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.456938 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1cf42d24-d670-433b-8a60-11e6cadde0dd-kube-api-access-hpcnm" (OuterVolumeSpecName: "kube-api-access-hpcnm") pod "1cf42d24-d670-433b-8a60-11e6cadde0dd" (UID: "1cf42d24-d670-433b-8a60-11e6cadde0dd"). InnerVolumeSpecName "kube-api-access-hpcnm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.462445 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a25d26a9-7c6b-455e-9b8d-cc2fba08c576-kube-api-access-5jq5p" (OuterVolumeSpecName: "kube-api-access-5jq5p") pod "a25d26a9-7c6b-455e-9b8d-cc2fba08c576" (UID: "a25d26a9-7c6b-455e-9b8d-cc2fba08c576"). InnerVolumeSpecName "kube-api-access-5jq5p". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.470071 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a25d26a9-7c6b-455e-9b8d-cc2fba08c576-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "a25d26a9-7c6b-455e-9b8d-cc2fba08c576" (UID: "a25d26a9-7c6b-455e-9b8d-cc2fba08c576"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.539184 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxb4v\" (UniqueName: \"kubernetes.io/projected/02be1451-6780-479c-ab94-37503fea3645-kube-api-access-wxb4v\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.539230 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3f60d900-cfcc-4840-8f6b-cdcb043a510b-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.539273 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mdlpn\" (UniqueName: \"kubernetes.io/projected/3f60d900-cfcc-4840-8f6b-cdcb043a510b-kube-api-access-mdlpn\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.539289 4716 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a25d26a9-7c6b-455e-9b8d-cc2fba08c576-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.539301 4716 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/a25d26a9-7c6b-455e-9b8d-cc2fba08c576-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.539315 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5jq5p\" (UniqueName: \"kubernetes.io/projected/a25d26a9-7c6b-455e-9b8d-cc2fba08c576-kube-api-access-5jq5p\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.539362 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e8a9e79-4b63-4c3f-968c-9524e682af80-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.539377 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hg5hh\" (UniqueName: \"kubernetes.io/projected/6e8a9e79-4b63-4c3f-968c-9524e682af80-kube-api-access-hg5hh\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.539390 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hpcnm\" (UniqueName: \"kubernetes.io/projected/1cf42d24-d670-433b-8a60-11e6cadde0dd-kube-api-access-hpcnm\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.539438 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1cf42d24-d670-433b-8a60-11e6cadde0dd-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.539453 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/02be1451-6780-479c-ab94-37503fea3645-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.541733 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3f60d900-cfcc-4840-8f6b-cdcb043a510b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3f60d900-cfcc-4840-8f6b-cdcb043a510b" (UID: "3f60d900-cfcc-4840-8f6b-cdcb043a510b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.557871 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6e8a9e79-4b63-4c3f-968c-9524e682af80-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6e8a9e79-4b63-4c3f-968c-9524e682af80" (UID: "6e8a9e79-4b63-4c3f-968c-9524e682af80"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.571113 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/02be1451-6780-479c-ab94-37503fea3645-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "02be1451-6780-479c-ab94-37503fea3645" (UID: "02be1451-6780-479c-ab94-37503fea3645"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.595607 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1cf42d24-d670-433b-8a60-11e6cadde0dd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1cf42d24-d670-433b-8a60-11e6cadde0dd" (UID: "1cf42d24-d670-433b-8a60-11e6cadde0dd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.641190 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1cf42d24-d670-433b-8a60-11e6cadde0dd-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.641233 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/02be1451-6780-479c-ab94-37503fea3645-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.641245 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e8a9e79-4b63-4c3f-968c-9524e682af80-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.641258 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3f60d900-cfcc-4840-8f6b-cdcb043a510b-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.677290 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bfn6b" event={"ID":"1cf42d24-d670-433b-8a60-11e6cadde0dd","Type":"ContainerDied","Data":"66ba1daa9663c12a71673817977146a8bdb57ebfb215e5a2dddd3b4235f1963d"} Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.677342 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bfn6b" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.677376 4716 scope.go:117] "RemoveContainer" containerID="9745c35448b658c3e07a00924fdb91a8cec39e5bef15b139aafc23ba470eba09" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.685569 4716 generic.go:334] "Generic (PLEG): container finished" podID="a25d26a9-7c6b-455e-9b8d-cc2fba08c576" containerID="f41778fd6b384a6e965b5d23d789a31b9db3e35ff2f02881f43e7600ef01a0cf" exitCode=0 Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.685998 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-2rcsb" event={"ID":"a25d26a9-7c6b-455e-9b8d-cc2fba08c576","Type":"ContainerDied","Data":"f41778fd6b384a6e965b5d23d789a31b9db3e35ff2f02881f43e7600ef01a0cf"} Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.686052 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-2rcsb" event={"ID":"a25d26a9-7c6b-455e-9b8d-cc2fba08c576","Type":"ContainerDied","Data":"ec635134830b3623a11ff2c30c911857e13239d4c777b5248341d736a634afc0"} Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.686202 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-2rcsb" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.693279 4716 generic.go:334] "Generic (PLEG): container finished" podID="3f60d900-cfcc-4840-8f6b-cdcb043a510b" containerID="7df4f90351c5e5d9d3c57257cc5fd6557a31cc1cd13daf6f75e25277e4aeb073" exitCode=0 Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.693374 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4zqlr" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.693400 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4zqlr" event={"ID":"3f60d900-cfcc-4840-8f6b-cdcb043a510b","Type":"ContainerDied","Data":"7df4f90351c5e5d9d3c57257cc5fd6557a31cc1cd13daf6f75e25277e4aeb073"} Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.694019 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4zqlr" event={"ID":"3f60d900-cfcc-4840-8f6b-cdcb043a510b","Type":"ContainerDied","Data":"19decfa94e89fd28925b7d57f995842e3e2e383f71554199d025c9064a694f85"} Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.705266 4716 generic.go:334] "Generic (PLEG): container finished" podID="e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3" containerID="63be79a46108bbcee26991617a2f26a64e81eaa15ba40da0e4b588940190ff4f" exitCode=0 Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.705355 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n8bgn" event={"ID":"e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3","Type":"ContainerDied","Data":"63be79a46108bbcee26991617a2f26a64e81eaa15ba40da0e4b588940190ff4f"} Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.707729 4716 generic.go:334] "Generic (PLEG): container finished" podID="07e1c69c-8b33-4342-8632-010554dfd1d5" containerID="86ec669b334f1f8428099c358a5874f0c67dd1eaec4ceb5b5bd3b8f03df3cd7c" exitCode=0 Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.707825 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v42h6" event={"ID":"07e1c69c-8b33-4342-8632-010554dfd1d5","Type":"ContainerDied","Data":"86ec669b334f1f8428099c358a5874f0c67dd1eaec4ceb5b5bd3b8f03df3cd7c"} Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.710701 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6grj2" event={"ID":"02be1451-6780-479c-ab94-37503fea3645","Type":"ContainerDied","Data":"dfd9e4ea3313ea4f7ef2ea895327121a5ebc8113f1cf595354434be6c3b40514"} Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.710854 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6grj2" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.723480 4716 generic.go:334] "Generic (PLEG): container finished" podID="85d4a173-8cf0-4f51-b713-6a8624461b61" containerID="2ce17d4f6f5bb72b5cc3c0e8133d8f4194518093419d32bc323a823c5874abbc" exitCode=0 Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.723657 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5jwpg" event={"ID":"85d4a173-8cf0-4f51-b713-6a8624461b61","Type":"ContainerDied","Data":"2ce17d4f6f5bb72b5cc3c0e8133d8f4194518093419d32bc323a823c5874abbc"} Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.727447 4716 generic.go:334] "Generic (PLEG): container finished" podID="1fc78b58-085f-42f7-bec5-b28d0d2bc191" containerID="f509522db616bbeae8680f6d8f9a149d7b756f08ace1a4caab509ac375d07714" exitCode=0 Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.727673 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dsxtv" event={"ID":"1fc78b58-085f-42f7-bec5-b28d0d2bc191","Type":"ContainerDied","Data":"f509522db616bbeae8680f6d8f9a149d7b756f08ace1a4caab509ac375d07714"} Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.727732 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dsxtv" event={"ID":"1fc78b58-085f-42f7-bec5-b28d0d2bc191","Type":"ContainerDied","Data":"857846db4026a0de66208f6f8dbbc4a5061d74172731656e963801330af96ba3"} Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.728019 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dsxtv" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.731421 4716 generic.go:334] "Generic (PLEG): container finished" podID="6e8a9e79-4b63-4c3f-968c-9524e682af80" containerID="50d5c3cb77dda1a27c7bf642211449fd91e602a0d9c72c62dfc0325a21b9ddea" exitCode=0 Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.731463 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f8rnc" event={"ID":"6e8a9e79-4b63-4c3f-968c-9524e682af80","Type":"ContainerDied","Data":"50d5c3cb77dda1a27c7bf642211449fd91e602a0d9c72c62dfc0325a21b9ddea"} Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.731499 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f8rnc" event={"ID":"6e8a9e79-4b63-4c3f-968c-9524e682af80","Type":"ContainerDied","Data":"c9004d01c8dfb77b44e0398c1ed3282aa30efc25c1422a52b4492762d94f6d2a"} Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.731440 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-f8rnc" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.751314 4716 scope.go:117] "RemoveContainer" containerID="7ccb29d9eaaff5cc12554a78c6f582140b6cd27eb15a06f2e24300f273d9ed0e" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.758648 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n8bgn" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.771285 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5jwpg" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.790180 4716 scope.go:117] "RemoveContainer" containerID="1fd185a0a7bdfe9dc50f5351db2a1503ab6d925647dc327bf1092ecda3f3cfdc" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.801022 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4zqlr"] Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.805070 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v42h6" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.809049 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-4zqlr"] Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.821249 4716 scope.go:117] "RemoveContainer" containerID="f41778fd6b384a6e965b5d23d789a31b9db3e35ff2f02881f43e7600ef01a0cf" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.825811 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-bfn6b"] Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.831755 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-bfn6b"] Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.844229 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kdx22\" (UniqueName: \"kubernetes.io/projected/e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3-kube-api-access-kdx22\") pod \"e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3\" (UID: \"e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3\") " Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.844317 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fpm7d\" (UniqueName: \"kubernetes.io/projected/85d4a173-8cf0-4f51-b713-6a8624461b61-kube-api-access-fpm7d\") pod \"85d4a173-8cf0-4f51-b713-6a8624461b61\" (UID: \"85d4a173-8cf0-4f51-b713-6a8624461b61\") " Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.844375 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cq4hz\" (UniqueName: \"kubernetes.io/projected/07e1c69c-8b33-4342-8632-010554dfd1d5-kube-api-access-cq4hz\") pod \"07e1c69c-8b33-4342-8632-010554dfd1d5\" (UID: \"07e1c69c-8b33-4342-8632-010554dfd1d5\") " Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.844417 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3-utilities\") pod \"e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3\" (UID: \"e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3\") " Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.844476 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85d4a173-8cf0-4f51-b713-6a8624461b61-utilities\") pod \"85d4a173-8cf0-4f51-b713-6a8624461b61\" (UID: \"85d4a173-8cf0-4f51-b713-6a8624461b61\") " Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.844507 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07e1c69c-8b33-4342-8632-010554dfd1d5-utilities\") pod \"07e1c69c-8b33-4342-8632-010554dfd1d5\" (UID: \"07e1c69c-8b33-4342-8632-010554dfd1d5\") " Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.844553 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3-catalog-content\") pod \"e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3\" (UID: \"e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3\") " Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.844584 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85d4a173-8cf0-4f51-b713-6a8624461b61-catalog-content\") pod \"85d4a173-8cf0-4f51-b713-6a8624461b61\" (UID: \"85d4a173-8cf0-4f51-b713-6a8624461b61\") " Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.844606 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07e1c69c-8b33-4342-8632-010554dfd1d5-catalog-content\") pod \"07e1c69c-8b33-4342-8632-010554dfd1d5\" (UID: \"07e1c69c-8b33-4342-8632-010554dfd1d5\") " Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.848444 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/85d4a173-8cf0-4f51-b713-6a8624461b61-utilities" (OuterVolumeSpecName: "utilities") pod "85d4a173-8cf0-4f51-b713-6a8624461b61" (UID: "85d4a173-8cf0-4f51-b713-6a8624461b61"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.848891 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07e1c69c-8b33-4342-8632-010554dfd1d5-utilities" (OuterVolumeSpecName: "utilities") pod "07e1c69c-8b33-4342-8632-010554dfd1d5" (UID: "07e1c69c-8b33-4342-8632-010554dfd1d5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.849544 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3-utilities" (OuterVolumeSpecName: "utilities") pod "e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3" (UID: "e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.854117 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07e1c69c-8b33-4342-8632-010554dfd1d5-kube-api-access-cq4hz" (OuterVolumeSpecName: "kube-api-access-cq4hz") pod "07e1c69c-8b33-4342-8632-010554dfd1d5" (UID: "07e1c69c-8b33-4342-8632-010554dfd1d5"). InnerVolumeSpecName "kube-api-access-cq4hz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.856923 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/85d4a173-8cf0-4f51-b713-6a8624461b61-kube-api-access-fpm7d" (OuterVolumeSpecName: "kube-api-access-fpm7d") pod "85d4a173-8cf0-4f51-b713-6a8624461b61" (UID: "85d4a173-8cf0-4f51-b713-6a8624461b61"). InnerVolumeSpecName "kube-api-access-fpm7d". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.856933 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3-kube-api-access-kdx22" (OuterVolumeSpecName: "kube-api-access-kdx22") pod "e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3" (UID: "e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3"). InnerVolumeSpecName "kube-api-access-kdx22". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.860676 4716 scope.go:117] "RemoveContainer" containerID="977dad93384f53ac03b0809aba7382612d2054b587d18b1af2f99c77b79b772f" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.874602 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6grj2"] Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.891117 4716 scope.go:117] "RemoveContainer" containerID="f41778fd6b384a6e965b5d23d789a31b9db3e35ff2f02881f43e7600ef01a0cf" Dec 09 15:14:03 crc kubenswrapper[4716]: E1209 15:14:03.891821 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f41778fd6b384a6e965b5d23d789a31b9db3e35ff2f02881f43e7600ef01a0cf\": container with ID starting with f41778fd6b384a6e965b5d23d789a31b9db3e35ff2f02881f43e7600ef01a0cf not found: ID does not exist" containerID="f41778fd6b384a6e965b5d23d789a31b9db3e35ff2f02881f43e7600ef01a0cf" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.892207 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3" (UID: "e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.892014 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f41778fd6b384a6e965b5d23d789a31b9db3e35ff2f02881f43e7600ef01a0cf"} err="failed to get container status \"f41778fd6b384a6e965b5d23d789a31b9db3e35ff2f02881f43e7600ef01a0cf\": rpc error: code = NotFound desc = could not find container \"f41778fd6b384a6e965b5d23d789a31b9db3e35ff2f02881f43e7600ef01a0cf\": container with ID starting with f41778fd6b384a6e965b5d23d789a31b9db3e35ff2f02881f43e7600ef01a0cf not found: ID does not exist" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.895915 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-6grj2"] Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.898197 4716 scope.go:117] "RemoveContainer" containerID="977dad93384f53ac03b0809aba7382612d2054b587d18b1af2f99c77b79b772f" Dec 09 15:14:03 crc kubenswrapper[4716]: E1209 15:14:03.899110 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"977dad93384f53ac03b0809aba7382612d2054b587d18b1af2f99c77b79b772f\": container with ID starting with 977dad93384f53ac03b0809aba7382612d2054b587d18b1af2f99c77b79b772f not found: ID does not exist" containerID="977dad93384f53ac03b0809aba7382612d2054b587d18b1af2f99c77b79b772f" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.899273 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"977dad93384f53ac03b0809aba7382612d2054b587d18b1af2f99c77b79b772f"} err="failed to get container status \"977dad93384f53ac03b0809aba7382612d2054b587d18b1af2f99c77b79b772f\": rpc error: code = NotFound desc = could not find container \"977dad93384f53ac03b0809aba7382612d2054b587d18b1af2f99c77b79b772f\": container with ID starting with 977dad93384f53ac03b0809aba7382612d2054b587d18b1af2f99c77b79b772f not found: ID does not exist" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.899450 4716 scope.go:117] "RemoveContainer" containerID="7df4f90351c5e5d9d3c57257cc5fd6557a31cc1cd13daf6f75e25277e4aeb073" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.905604 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2rcsb"] Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.927486 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2rcsb"] Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.929865 4716 scope.go:117] "RemoveContainer" containerID="f9a1f90c369506af19e9df3341885b4142479a03d6dafbc1dfce6dcb44927759" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.941411 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dsxtv"] Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.947293 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.947811 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kdx22\" (UniqueName: \"kubernetes.io/projected/e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3-kube-api-access-kdx22\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.947912 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fpm7d\" (UniqueName: \"kubernetes.io/projected/85d4a173-8cf0-4f51-b713-6a8624461b61-kube-api-access-fpm7d\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.947973 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cq4hz\" (UniqueName: \"kubernetes.io/projected/07e1c69c-8b33-4342-8632-010554dfd1d5-kube-api-access-cq4hz\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.948044 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.948129 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85d4a173-8cf0-4f51-b713-6a8624461b61-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.948195 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07e1c69c-8b33-4342-8632-010554dfd1d5-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.948356 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-dsxtv"] Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.950603 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-f8rnc"] Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.955853 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-f8rnc"] Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.970815 4716 scope.go:117] "RemoveContainer" containerID="f58675f8901d19bd243d49e43a908d81d62de58bf83729d3fcaec7285722210c" Dec 09 15:14:03 crc kubenswrapper[4716]: I1209 15:14:03.988176 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-g5h2p"] Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.005882 4716 scope.go:117] "RemoveContainer" containerID="7df4f90351c5e5d9d3c57257cc5fd6557a31cc1cd13daf6f75e25277e4aeb073" Dec 09 15:14:04 crc kubenswrapper[4716]: E1209 15:14:04.007026 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7df4f90351c5e5d9d3c57257cc5fd6557a31cc1cd13daf6f75e25277e4aeb073\": container with ID starting with 7df4f90351c5e5d9d3c57257cc5fd6557a31cc1cd13daf6f75e25277e4aeb073 not found: ID does not exist" containerID="7df4f90351c5e5d9d3c57257cc5fd6557a31cc1cd13daf6f75e25277e4aeb073" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.007058 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7df4f90351c5e5d9d3c57257cc5fd6557a31cc1cd13daf6f75e25277e4aeb073"} err="failed to get container status \"7df4f90351c5e5d9d3c57257cc5fd6557a31cc1cd13daf6f75e25277e4aeb073\": rpc error: code = NotFound desc = could not find container \"7df4f90351c5e5d9d3c57257cc5fd6557a31cc1cd13daf6f75e25277e4aeb073\": container with ID starting with 7df4f90351c5e5d9d3c57257cc5fd6557a31cc1cd13daf6f75e25277e4aeb073 not found: ID does not exist" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.007086 4716 scope.go:117] "RemoveContainer" containerID="f9a1f90c369506af19e9df3341885b4142479a03d6dafbc1dfce6dcb44927759" Dec 09 15:14:04 crc kubenswrapper[4716]: E1209 15:14:04.007697 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f9a1f90c369506af19e9df3341885b4142479a03d6dafbc1dfce6dcb44927759\": container with ID starting with f9a1f90c369506af19e9df3341885b4142479a03d6dafbc1dfce6dcb44927759 not found: ID does not exist" containerID="f9a1f90c369506af19e9df3341885b4142479a03d6dafbc1dfce6dcb44927759" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.007761 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f9a1f90c369506af19e9df3341885b4142479a03d6dafbc1dfce6dcb44927759"} err="failed to get container status \"f9a1f90c369506af19e9df3341885b4142479a03d6dafbc1dfce6dcb44927759\": rpc error: code = NotFound desc = could not find container \"f9a1f90c369506af19e9df3341885b4142479a03d6dafbc1dfce6dcb44927759\": container with ID starting with f9a1f90c369506af19e9df3341885b4142479a03d6dafbc1dfce6dcb44927759 not found: ID does not exist" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.007807 4716 scope.go:117] "RemoveContainer" containerID="f58675f8901d19bd243d49e43a908d81d62de58bf83729d3fcaec7285722210c" Dec 09 15:14:04 crc kubenswrapper[4716]: E1209 15:14:04.009514 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f58675f8901d19bd243d49e43a908d81d62de58bf83729d3fcaec7285722210c\": container with ID starting with f58675f8901d19bd243d49e43a908d81d62de58bf83729d3fcaec7285722210c not found: ID does not exist" containerID="f58675f8901d19bd243d49e43a908d81d62de58bf83729d3fcaec7285722210c" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.009548 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f58675f8901d19bd243d49e43a908d81d62de58bf83729d3fcaec7285722210c"} err="failed to get container status \"f58675f8901d19bd243d49e43a908d81d62de58bf83729d3fcaec7285722210c\": rpc error: code = NotFound desc = could not find container \"f58675f8901d19bd243d49e43a908d81d62de58bf83729d3fcaec7285722210c\": container with ID starting with f58675f8901d19bd243d49e43a908d81d62de58bf83729d3fcaec7285722210c not found: ID does not exist" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.009567 4716 scope.go:117] "RemoveContainer" containerID="6bd0c4b854e56de90699d8d0d76ab06929112419f2d0565228fc83584b40774f" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.027890 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07e1c69c-8b33-4342-8632-010554dfd1d5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "07e1c69c-8b33-4342-8632-010554dfd1d5" (UID: "07e1c69c-8b33-4342-8632-010554dfd1d5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.031696 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/85d4a173-8cf0-4f51-b713-6a8624461b61-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "85d4a173-8cf0-4f51-b713-6a8624461b61" (UID: "85d4a173-8cf0-4f51-b713-6a8624461b61"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.049821 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85d4a173-8cf0-4f51-b713-6a8624461b61-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.049855 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07e1c69c-8b33-4342-8632-010554dfd1d5-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.055784 4716 scope.go:117] "RemoveContainer" containerID="c83dc178d19f9be51a0ccd17e650554adbcc5b4dc2734efcc78c92b4436a87f2" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.079258 4716 scope.go:117] "RemoveContainer" containerID="22f283856a04138e7bc6da28c0e08c5315f3ff0a5122ff018763f120f7469851" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.100724 4716 scope.go:117] "RemoveContainer" containerID="f509522db616bbeae8680f6d8f9a149d7b756f08ace1a4caab509ac375d07714" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.124320 4716 scope.go:117] "RemoveContainer" containerID="2a6dfc84be839891caa06b48a43ab1b9fe99052d105fae7d6d529186f668827d" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.143526 4716 scope.go:117] "RemoveContainer" containerID="613f61deca0f4f584e3a978a11ee85ca0810bfe2a53d8c9fcf8f81a05855d94f" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.163421 4716 scope.go:117] "RemoveContainer" containerID="f509522db616bbeae8680f6d8f9a149d7b756f08ace1a4caab509ac375d07714" Dec 09 15:14:04 crc kubenswrapper[4716]: E1209 15:14:04.164092 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f509522db616bbeae8680f6d8f9a149d7b756f08ace1a4caab509ac375d07714\": container with ID starting with f509522db616bbeae8680f6d8f9a149d7b756f08ace1a4caab509ac375d07714 not found: ID does not exist" containerID="f509522db616bbeae8680f6d8f9a149d7b756f08ace1a4caab509ac375d07714" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.164125 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f509522db616bbeae8680f6d8f9a149d7b756f08ace1a4caab509ac375d07714"} err="failed to get container status \"f509522db616bbeae8680f6d8f9a149d7b756f08ace1a4caab509ac375d07714\": rpc error: code = NotFound desc = could not find container \"f509522db616bbeae8680f6d8f9a149d7b756f08ace1a4caab509ac375d07714\": container with ID starting with f509522db616bbeae8680f6d8f9a149d7b756f08ace1a4caab509ac375d07714 not found: ID does not exist" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.164178 4716 scope.go:117] "RemoveContainer" containerID="2a6dfc84be839891caa06b48a43ab1b9fe99052d105fae7d6d529186f668827d" Dec 09 15:14:04 crc kubenswrapper[4716]: E1209 15:14:04.164567 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2a6dfc84be839891caa06b48a43ab1b9fe99052d105fae7d6d529186f668827d\": container with ID starting with 2a6dfc84be839891caa06b48a43ab1b9fe99052d105fae7d6d529186f668827d not found: ID does not exist" containerID="2a6dfc84be839891caa06b48a43ab1b9fe99052d105fae7d6d529186f668827d" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.165138 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2a6dfc84be839891caa06b48a43ab1b9fe99052d105fae7d6d529186f668827d"} err="failed to get container status \"2a6dfc84be839891caa06b48a43ab1b9fe99052d105fae7d6d529186f668827d\": rpc error: code = NotFound desc = could not find container \"2a6dfc84be839891caa06b48a43ab1b9fe99052d105fae7d6d529186f668827d\": container with ID starting with 2a6dfc84be839891caa06b48a43ab1b9fe99052d105fae7d6d529186f668827d not found: ID does not exist" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.165152 4716 scope.go:117] "RemoveContainer" containerID="613f61deca0f4f584e3a978a11ee85ca0810bfe2a53d8c9fcf8f81a05855d94f" Dec 09 15:14:04 crc kubenswrapper[4716]: E1209 15:14:04.165499 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"613f61deca0f4f584e3a978a11ee85ca0810bfe2a53d8c9fcf8f81a05855d94f\": container with ID starting with 613f61deca0f4f584e3a978a11ee85ca0810bfe2a53d8c9fcf8f81a05855d94f not found: ID does not exist" containerID="613f61deca0f4f584e3a978a11ee85ca0810bfe2a53d8c9fcf8f81a05855d94f" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.165525 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"613f61deca0f4f584e3a978a11ee85ca0810bfe2a53d8c9fcf8f81a05855d94f"} err="failed to get container status \"613f61deca0f4f584e3a978a11ee85ca0810bfe2a53d8c9fcf8f81a05855d94f\": rpc error: code = NotFound desc = could not find container \"613f61deca0f4f584e3a978a11ee85ca0810bfe2a53d8c9fcf8f81a05855d94f\": container with ID starting with 613f61deca0f4f584e3a978a11ee85ca0810bfe2a53d8c9fcf8f81a05855d94f not found: ID does not exist" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.165544 4716 scope.go:117] "RemoveContainer" containerID="50d5c3cb77dda1a27c7bf642211449fd91e602a0d9c72c62dfc0325a21b9ddea" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.182820 4716 scope.go:117] "RemoveContainer" containerID="9616f3e8fbf6246fddfb39c3e5cd0cec33371bae8dee5c048a9d3c059c0c08bf" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.205235 4716 scope.go:117] "RemoveContainer" containerID="8c31a4bb36f079f1bcb1abd04e2f3ceb7cd040d82eed56de2441195e99253552" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.222250 4716 scope.go:117] "RemoveContainer" containerID="50d5c3cb77dda1a27c7bf642211449fd91e602a0d9c72c62dfc0325a21b9ddea" Dec 09 15:14:04 crc kubenswrapper[4716]: E1209 15:14:04.222921 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"50d5c3cb77dda1a27c7bf642211449fd91e602a0d9c72c62dfc0325a21b9ddea\": container with ID starting with 50d5c3cb77dda1a27c7bf642211449fd91e602a0d9c72c62dfc0325a21b9ddea not found: ID does not exist" containerID="50d5c3cb77dda1a27c7bf642211449fd91e602a0d9c72c62dfc0325a21b9ddea" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.222972 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"50d5c3cb77dda1a27c7bf642211449fd91e602a0d9c72c62dfc0325a21b9ddea"} err="failed to get container status \"50d5c3cb77dda1a27c7bf642211449fd91e602a0d9c72c62dfc0325a21b9ddea\": rpc error: code = NotFound desc = could not find container \"50d5c3cb77dda1a27c7bf642211449fd91e602a0d9c72c62dfc0325a21b9ddea\": container with ID starting with 50d5c3cb77dda1a27c7bf642211449fd91e602a0d9c72c62dfc0325a21b9ddea not found: ID does not exist" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.223010 4716 scope.go:117] "RemoveContainer" containerID="9616f3e8fbf6246fddfb39c3e5cd0cec33371bae8dee5c048a9d3c059c0c08bf" Dec 09 15:14:04 crc kubenswrapper[4716]: E1209 15:14:04.223409 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9616f3e8fbf6246fddfb39c3e5cd0cec33371bae8dee5c048a9d3c059c0c08bf\": container with ID starting with 9616f3e8fbf6246fddfb39c3e5cd0cec33371bae8dee5c048a9d3c059c0c08bf not found: ID does not exist" containerID="9616f3e8fbf6246fddfb39c3e5cd0cec33371bae8dee5c048a9d3c059c0c08bf" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.223438 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9616f3e8fbf6246fddfb39c3e5cd0cec33371bae8dee5c048a9d3c059c0c08bf"} err="failed to get container status \"9616f3e8fbf6246fddfb39c3e5cd0cec33371bae8dee5c048a9d3c059c0c08bf\": rpc error: code = NotFound desc = could not find container \"9616f3e8fbf6246fddfb39c3e5cd0cec33371bae8dee5c048a9d3c059c0c08bf\": container with ID starting with 9616f3e8fbf6246fddfb39c3e5cd0cec33371bae8dee5c048a9d3c059c0c08bf not found: ID does not exist" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.223458 4716 scope.go:117] "RemoveContainer" containerID="8c31a4bb36f079f1bcb1abd04e2f3ceb7cd040d82eed56de2441195e99253552" Dec 09 15:14:04 crc kubenswrapper[4716]: E1209 15:14:04.223773 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8c31a4bb36f079f1bcb1abd04e2f3ceb7cd040d82eed56de2441195e99253552\": container with ID starting with 8c31a4bb36f079f1bcb1abd04e2f3ceb7cd040d82eed56de2441195e99253552 not found: ID does not exist" containerID="8c31a4bb36f079f1bcb1abd04e2f3ceb7cd040d82eed56de2441195e99253552" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.223797 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c31a4bb36f079f1bcb1abd04e2f3ceb7cd040d82eed56de2441195e99253552"} err="failed to get container status \"8c31a4bb36f079f1bcb1abd04e2f3ceb7cd040d82eed56de2441195e99253552\": rpc error: code = NotFound desc = could not find container \"8c31a4bb36f079f1bcb1abd04e2f3ceb7cd040d82eed56de2441195e99253552\": container with ID starting with 8c31a4bb36f079f1bcb1abd04e2f3ceb7cd040d82eed56de2441195e99253552 not found: ID does not exist" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.742276 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-g5h2p" event={"ID":"9197123c-d444-4d8e-94cd-8a9f61317430","Type":"ContainerStarted","Data":"e1597c9452650a2739393bfeff7a6411051dae1daf52689f12f87f4a28bdb101"} Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.742338 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-g5h2p" event={"ID":"9197123c-d444-4d8e-94cd-8a9f61317430","Type":"ContainerStarted","Data":"ed1f5f685f078824f7a29f7d4e53b943b3366bb3967082d88da811e5dc39a94a"} Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.742576 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-g5h2p" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.745554 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-g5h2p" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.747485 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5jwpg" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.749599 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5jwpg" event={"ID":"85d4a173-8cf0-4f51-b713-6a8624461b61","Type":"ContainerDied","Data":"5d74736631f34e73e7e1b3643abf1a43c3aa9b29b96beb775678339392b3f07b"} Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.749675 4716 scope.go:117] "RemoveContainer" containerID="2ce17d4f6f5bb72b5cc3c0e8133d8f4194518093419d32bc323a823c5874abbc" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.754251 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n8bgn" event={"ID":"e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3","Type":"ContainerDied","Data":"e83b6d65ff49cab3387081debbc0a92ab7490a24706db0977eba245a286857bc"} Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.754429 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n8bgn" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.772879 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-g5h2p" podStartSLOduration=2.772842293 podStartE2EDuration="2.772842293s" podCreationTimestamp="2025-12-09 15:14:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:14:04.768086543 +0000 UTC m=+331.922830691" watchObservedRunningTime="2025-12-09 15:14:04.772842293 +0000 UTC m=+331.927586281" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.776633 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v42h6" event={"ID":"07e1c69c-8b33-4342-8632-010554dfd1d5","Type":"ContainerDied","Data":"8791dd70049dd7f1da800e92ca374499ec23b4082be581d0a558beb26c573f87"} Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.776662 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v42h6" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.777101 4716 scope.go:117] "RemoveContainer" containerID="089e2e968ce107555275c5f7739c492b0a981e8a30576f2f0eb1dccf29e2e157" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.821137 4716 scope.go:117] "RemoveContainer" containerID="a9f711a43e0f1ae37491b98af162015b72d0a8ae01146c139b6f769c62fcf9e3" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.835050 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5jwpg"] Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.845887 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-5jwpg"] Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.853137 4716 scope.go:117] "RemoveContainer" containerID="63be79a46108bbcee26991617a2f26a64e81eaa15ba40da0e4b588940190ff4f" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.865438 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-n8bgn"] Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.873919 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-n8bgn"] Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.891404 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-v42h6"] Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.896935 4716 scope.go:117] "RemoveContainer" containerID="a6f9fe25ab68fdc4dc52d6f71595b74db2fa6580c2868f988598e5d6f5ea3563" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.898178 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-v42h6"] Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.912154 4716 scope.go:117] "RemoveContainer" containerID="638be08b34d6174c39335a4616f23edab30f637d608ad3ba0e9ccdaf302bf789" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.936921 4716 scope.go:117] "RemoveContainer" containerID="86ec669b334f1f8428099c358a5874f0c67dd1eaec4ceb5b5bd3b8f03df3cd7c" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.951804 4716 scope.go:117] "RemoveContainer" containerID="ab5651c5b058a34b3e3beb2670b610913033b142f2b0f0de6e9428d2206bdd0c" Dec 09 15:14:04 crc kubenswrapper[4716]: I1209 15:14:04.968011 4716 scope.go:117] "RemoveContainer" containerID="eff16e0447e281aecb333aea9a281ccac85c36d6dcbf44a3c358158488481a52" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.233043 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="02be1451-6780-479c-ab94-37503fea3645" path="/var/lib/kubelet/pods/02be1451-6780-479c-ab94-37503fea3645/volumes" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.234139 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="07e1c69c-8b33-4342-8632-010554dfd1d5" path="/var/lib/kubelet/pods/07e1c69c-8b33-4342-8632-010554dfd1d5/volumes" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.234983 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1cf42d24-d670-433b-8a60-11e6cadde0dd" path="/var/lib/kubelet/pods/1cf42d24-d670-433b-8a60-11e6cadde0dd/volumes" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.241432 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1fc78b58-085f-42f7-bec5-b28d0d2bc191" path="/var/lib/kubelet/pods/1fc78b58-085f-42f7-bec5-b28d0d2bc191/volumes" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.242393 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f60d900-cfcc-4840-8f6b-cdcb043a510b" path="/var/lib/kubelet/pods/3f60d900-cfcc-4840-8f6b-cdcb043a510b/volumes" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.244093 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6e8a9e79-4b63-4c3f-968c-9524e682af80" path="/var/lib/kubelet/pods/6e8a9e79-4b63-4c3f-968c-9524e682af80/volumes" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.244929 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="85d4a173-8cf0-4f51-b713-6a8624461b61" path="/var/lib/kubelet/pods/85d4a173-8cf0-4f51-b713-6a8624461b61/volumes" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.245762 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a25d26a9-7c6b-455e-9b8d-cc2fba08c576" path="/var/lib/kubelet/pods/a25d26a9-7c6b-455e-9b8d-cc2fba08c576/volumes" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.247227 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3" path="/var/lib/kubelet/pods/e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3/volumes" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.640830 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-ktf2s"] Dec 09 15:14:05 crc kubenswrapper[4716]: E1209 15:14:05.641161 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1fc78b58-085f-42f7-bec5-b28d0d2bc191" containerName="registry-server" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.641182 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="1fc78b58-085f-42f7-bec5-b28d0d2bc191" containerName="registry-server" Dec 09 15:14:05 crc kubenswrapper[4716]: E1209 15:14:05.641195 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07e1c69c-8b33-4342-8632-010554dfd1d5" containerName="registry-server" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.641201 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="07e1c69c-8b33-4342-8632-010554dfd1d5" containerName="registry-server" Dec 09 15:14:05 crc kubenswrapper[4716]: E1209 15:14:05.641212 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1cf42d24-d670-433b-8a60-11e6cadde0dd" containerName="extract-content" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.641219 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="1cf42d24-d670-433b-8a60-11e6cadde0dd" containerName="extract-content" Dec 09 15:14:05 crc kubenswrapper[4716]: E1209 15:14:05.641228 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85d4a173-8cf0-4f51-b713-6a8624461b61" containerName="extract-content" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.641235 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="85d4a173-8cf0-4f51-b713-6a8624461b61" containerName="extract-content" Dec 09 15:14:05 crc kubenswrapper[4716]: E1209 15:14:05.641243 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f60d900-cfcc-4840-8f6b-cdcb043a510b" containerName="extract-content" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.641249 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f60d900-cfcc-4840-8f6b-cdcb043a510b" containerName="extract-content" Dec 09 15:14:05 crc kubenswrapper[4716]: E1209 15:14:05.641257 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a25d26a9-7c6b-455e-9b8d-cc2fba08c576" containerName="marketplace-operator" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.641263 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="a25d26a9-7c6b-455e-9b8d-cc2fba08c576" containerName="marketplace-operator" Dec 09 15:14:05 crc kubenswrapper[4716]: E1209 15:14:05.641273 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a25d26a9-7c6b-455e-9b8d-cc2fba08c576" containerName="marketplace-operator" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.641279 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="a25d26a9-7c6b-455e-9b8d-cc2fba08c576" containerName="marketplace-operator" Dec 09 15:14:05 crc kubenswrapper[4716]: E1209 15:14:05.641289 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02be1451-6780-479c-ab94-37503fea3645" containerName="extract-utilities" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.641294 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="02be1451-6780-479c-ab94-37503fea3645" containerName="extract-utilities" Dec 09 15:14:05 crc kubenswrapper[4716]: E1209 15:14:05.641302 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e8a9e79-4b63-4c3f-968c-9524e682af80" containerName="registry-server" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.641307 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e8a9e79-4b63-4c3f-968c-9524e682af80" containerName="registry-server" Dec 09 15:14:05 crc kubenswrapper[4716]: E1209 15:14:05.641317 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07e1c69c-8b33-4342-8632-010554dfd1d5" containerName="extract-content" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.641323 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="07e1c69c-8b33-4342-8632-010554dfd1d5" containerName="extract-content" Dec 09 15:14:05 crc kubenswrapper[4716]: E1209 15:14:05.641331 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f60d900-cfcc-4840-8f6b-cdcb043a510b" containerName="registry-server" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.641339 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f60d900-cfcc-4840-8f6b-cdcb043a510b" containerName="registry-server" Dec 09 15:14:05 crc kubenswrapper[4716]: E1209 15:14:05.641348 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e8a9e79-4b63-4c3f-968c-9524e682af80" containerName="extract-content" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.641356 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e8a9e79-4b63-4c3f-968c-9524e682af80" containerName="extract-content" Dec 09 15:14:05 crc kubenswrapper[4716]: E1209 15:14:05.641364 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85d4a173-8cf0-4f51-b713-6a8624461b61" containerName="extract-utilities" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.641371 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="85d4a173-8cf0-4f51-b713-6a8624461b61" containerName="extract-utilities" Dec 09 15:14:05 crc kubenswrapper[4716]: E1209 15:14:05.641384 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3" containerName="extract-content" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.641392 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3" containerName="extract-content" Dec 09 15:14:05 crc kubenswrapper[4716]: E1209 15:14:05.641401 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85d4a173-8cf0-4f51-b713-6a8624461b61" containerName="registry-server" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.641409 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="85d4a173-8cf0-4f51-b713-6a8624461b61" containerName="registry-server" Dec 09 15:14:05 crc kubenswrapper[4716]: E1209 15:14:05.641422 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02be1451-6780-479c-ab94-37503fea3645" containerName="registry-server" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.641430 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="02be1451-6780-479c-ab94-37503fea3645" containerName="registry-server" Dec 09 15:14:05 crc kubenswrapper[4716]: E1209 15:14:05.641441 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07e1c69c-8b33-4342-8632-010554dfd1d5" containerName="extract-utilities" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.641449 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="07e1c69c-8b33-4342-8632-010554dfd1d5" containerName="extract-utilities" Dec 09 15:14:05 crc kubenswrapper[4716]: E1209 15:14:05.641458 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1cf42d24-d670-433b-8a60-11e6cadde0dd" containerName="registry-server" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.641465 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="1cf42d24-d670-433b-8a60-11e6cadde0dd" containerName="registry-server" Dec 09 15:14:05 crc kubenswrapper[4716]: E1209 15:14:05.641474 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e8a9e79-4b63-4c3f-968c-9524e682af80" containerName="extract-utilities" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.641481 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e8a9e79-4b63-4c3f-968c-9524e682af80" containerName="extract-utilities" Dec 09 15:14:05 crc kubenswrapper[4716]: E1209 15:14:05.641491 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f60d900-cfcc-4840-8f6b-cdcb043a510b" containerName="extract-utilities" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.641502 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f60d900-cfcc-4840-8f6b-cdcb043a510b" containerName="extract-utilities" Dec 09 15:14:05 crc kubenswrapper[4716]: E1209 15:14:05.641513 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1cf42d24-d670-433b-8a60-11e6cadde0dd" containerName="extract-utilities" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.641524 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="1cf42d24-d670-433b-8a60-11e6cadde0dd" containerName="extract-utilities" Dec 09 15:14:05 crc kubenswrapper[4716]: E1209 15:14:05.641537 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1fc78b58-085f-42f7-bec5-b28d0d2bc191" containerName="extract-utilities" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.641547 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="1fc78b58-085f-42f7-bec5-b28d0d2bc191" containerName="extract-utilities" Dec 09 15:14:05 crc kubenswrapper[4716]: E1209 15:14:05.641557 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1fc78b58-085f-42f7-bec5-b28d0d2bc191" containerName="extract-content" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.641566 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="1fc78b58-085f-42f7-bec5-b28d0d2bc191" containerName="extract-content" Dec 09 15:14:05 crc kubenswrapper[4716]: E1209 15:14:05.641576 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02be1451-6780-479c-ab94-37503fea3645" containerName="extract-content" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.641583 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="02be1451-6780-479c-ab94-37503fea3645" containerName="extract-content" Dec 09 15:14:05 crc kubenswrapper[4716]: E1209 15:14:05.641595 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3" containerName="registry-server" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.641603 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3" containerName="registry-server" Dec 09 15:14:05 crc kubenswrapper[4716]: E1209 15:14:05.641615 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3" containerName="extract-utilities" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.641647 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3" containerName="extract-utilities" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.641769 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0f1833a-7b84-4dd1-a5cf-a1b65b7964b3" containerName="registry-server" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.641781 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="1fc78b58-085f-42f7-bec5-b28d0d2bc191" containerName="registry-server" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.641793 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f60d900-cfcc-4840-8f6b-cdcb043a510b" containerName="registry-server" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.641801 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="a25d26a9-7c6b-455e-9b8d-cc2fba08c576" containerName="marketplace-operator" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.641810 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="a25d26a9-7c6b-455e-9b8d-cc2fba08c576" containerName="marketplace-operator" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.641817 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="85d4a173-8cf0-4f51-b713-6a8624461b61" containerName="registry-server" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.641825 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="1cf42d24-d670-433b-8a60-11e6cadde0dd" containerName="registry-server" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.641834 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e8a9e79-4b63-4c3f-968c-9524e682af80" containerName="registry-server" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.641841 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="02be1451-6780-479c-ab94-37503fea3645" containerName="registry-server" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.641851 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="07e1c69c-8b33-4342-8632-010554dfd1d5" containerName="registry-server" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.642822 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ktf2s" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.645884 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.664735 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ktf2s"] Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.673519 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/47c82d22-477b-4bd0-afd3-9ae2fed959f0-utilities\") pod \"redhat-marketplace-ktf2s\" (UID: \"47c82d22-477b-4bd0-afd3-9ae2fed959f0\") " pod="openshift-marketplace/redhat-marketplace-ktf2s" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.674009 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zws4m\" (UniqueName: \"kubernetes.io/projected/47c82d22-477b-4bd0-afd3-9ae2fed959f0-kube-api-access-zws4m\") pod \"redhat-marketplace-ktf2s\" (UID: \"47c82d22-477b-4bd0-afd3-9ae2fed959f0\") " pod="openshift-marketplace/redhat-marketplace-ktf2s" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.674155 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/47c82d22-477b-4bd0-afd3-9ae2fed959f0-catalog-content\") pod \"redhat-marketplace-ktf2s\" (UID: \"47c82d22-477b-4bd0-afd3-9ae2fed959f0\") " pod="openshift-marketplace/redhat-marketplace-ktf2s" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.775697 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/47c82d22-477b-4bd0-afd3-9ae2fed959f0-utilities\") pod \"redhat-marketplace-ktf2s\" (UID: \"47c82d22-477b-4bd0-afd3-9ae2fed959f0\") " pod="openshift-marketplace/redhat-marketplace-ktf2s" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.775811 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zws4m\" (UniqueName: \"kubernetes.io/projected/47c82d22-477b-4bd0-afd3-9ae2fed959f0-kube-api-access-zws4m\") pod \"redhat-marketplace-ktf2s\" (UID: \"47c82d22-477b-4bd0-afd3-9ae2fed959f0\") " pod="openshift-marketplace/redhat-marketplace-ktf2s" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.775854 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/47c82d22-477b-4bd0-afd3-9ae2fed959f0-catalog-content\") pod \"redhat-marketplace-ktf2s\" (UID: \"47c82d22-477b-4bd0-afd3-9ae2fed959f0\") " pod="openshift-marketplace/redhat-marketplace-ktf2s" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.776429 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/47c82d22-477b-4bd0-afd3-9ae2fed959f0-utilities\") pod \"redhat-marketplace-ktf2s\" (UID: \"47c82d22-477b-4bd0-afd3-9ae2fed959f0\") " pod="openshift-marketplace/redhat-marketplace-ktf2s" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.776452 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/47c82d22-477b-4bd0-afd3-9ae2fed959f0-catalog-content\") pod \"redhat-marketplace-ktf2s\" (UID: \"47c82d22-477b-4bd0-afd3-9ae2fed959f0\") " pod="openshift-marketplace/redhat-marketplace-ktf2s" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.797739 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zws4m\" (UniqueName: \"kubernetes.io/projected/47c82d22-477b-4bd0-afd3-9ae2fed959f0-kube-api-access-zws4m\") pod \"redhat-marketplace-ktf2s\" (UID: \"47c82d22-477b-4bd0-afd3-9ae2fed959f0\") " pod="openshift-marketplace/redhat-marketplace-ktf2s" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.837401 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-9z9z6"] Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.838442 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9z9z6" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.842988 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.856739 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9z9z6"] Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.879868 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zxpzk\" (UniqueName: \"kubernetes.io/projected/ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922-kube-api-access-zxpzk\") pod \"redhat-operators-9z9z6\" (UID: \"ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922\") " pod="openshift-marketplace/redhat-operators-9z9z6" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.880213 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922-catalog-content\") pod \"redhat-operators-9z9z6\" (UID: \"ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922\") " pod="openshift-marketplace/redhat-operators-9z9z6" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.880556 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922-utilities\") pod \"redhat-operators-9z9z6\" (UID: \"ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922\") " pod="openshift-marketplace/redhat-operators-9z9z6" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.960280 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ktf2s" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.982500 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922-catalog-content\") pod \"redhat-operators-9z9z6\" (UID: \"ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922\") " pod="openshift-marketplace/redhat-operators-9z9z6" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.982582 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922-utilities\") pod \"redhat-operators-9z9z6\" (UID: \"ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922\") " pod="openshift-marketplace/redhat-operators-9z9z6" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.982653 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zxpzk\" (UniqueName: \"kubernetes.io/projected/ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922-kube-api-access-zxpzk\") pod \"redhat-operators-9z9z6\" (UID: \"ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922\") " pod="openshift-marketplace/redhat-operators-9z9z6" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.983186 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922-catalog-content\") pod \"redhat-operators-9z9z6\" (UID: \"ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922\") " pod="openshift-marketplace/redhat-operators-9z9z6" Dec 09 15:14:05 crc kubenswrapper[4716]: I1209 15:14:05.983303 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922-utilities\") pod \"redhat-operators-9z9z6\" (UID: \"ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922\") " pod="openshift-marketplace/redhat-operators-9z9z6" Dec 09 15:14:06 crc kubenswrapper[4716]: I1209 15:14:06.003190 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zxpzk\" (UniqueName: \"kubernetes.io/projected/ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922-kube-api-access-zxpzk\") pod \"redhat-operators-9z9z6\" (UID: \"ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922\") " pod="openshift-marketplace/redhat-operators-9z9z6" Dec 09 15:14:06 crc kubenswrapper[4716]: I1209 15:14:06.165316 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9z9z6" Dec 09 15:14:06 crc kubenswrapper[4716]: I1209 15:14:06.380443 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ktf2s"] Dec 09 15:14:06 crc kubenswrapper[4716]: W1209 15:14:06.385655 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod47c82d22_477b_4bd0_afd3_9ae2fed959f0.slice/crio-71c84e392c494c769129a162ae6ba0f22d6ff2a6fcdc685e08fe4393715e5f31 WatchSource:0}: Error finding container 71c84e392c494c769129a162ae6ba0f22d6ff2a6fcdc685e08fe4393715e5f31: Status 404 returned error can't find the container with id 71c84e392c494c769129a162ae6ba0f22d6ff2a6fcdc685e08fe4393715e5f31 Dec 09 15:14:06 crc kubenswrapper[4716]: I1209 15:14:06.610926 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9z9z6"] Dec 09 15:14:06 crc kubenswrapper[4716]: I1209 15:14:06.797661 4716 generic.go:334] "Generic (PLEG): container finished" podID="ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922" containerID="1dee59dc4d865801a95ef3fadf2b870d4d51760b563ddaa653f07d2141af993d" exitCode=0 Dec 09 15:14:06 crc kubenswrapper[4716]: I1209 15:14:06.797782 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9z9z6" event={"ID":"ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922","Type":"ContainerDied","Data":"1dee59dc4d865801a95ef3fadf2b870d4d51760b563ddaa653f07d2141af993d"} Dec 09 15:14:06 crc kubenswrapper[4716]: I1209 15:14:06.797824 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9z9z6" event={"ID":"ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922","Type":"ContainerStarted","Data":"262ff02b228ff0d90442f27669a7e58fdd3c35b4f2506d68ee31e11bd419c60c"} Dec 09 15:14:06 crc kubenswrapper[4716]: I1209 15:14:06.799682 4716 generic.go:334] "Generic (PLEG): container finished" podID="47c82d22-477b-4bd0-afd3-9ae2fed959f0" containerID="cd6bb6eb12e46403d776d115a57068b0e772591b52049a117feaeb6d795e7ff7" exitCode=0 Dec 09 15:14:06 crc kubenswrapper[4716]: I1209 15:14:06.799770 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ktf2s" event={"ID":"47c82d22-477b-4bd0-afd3-9ae2fed959f0","Type":"ContainerDied","Data":"cd6bb6eb12e46403d776d115a57068b0e772591b52049a117feaeb6d795e7ff7"} Dec 09 15:14:06 crc kubenswrapper[4716]: I1209 15:14:06.799824 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ktf2s" event={"ID":"47c82d22-477b-4bd0-afd3-9ae2fed959f0","Type":"ContainerStarted","Data":"71c84e392c494c769129a162ae6ba0f22d6ff2a6fcdc685e08fe4393715e5f31"} Dec 09 15:14:06 crc kubenswrapper[4716]: E1209 15:14:06.865375 4716 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd90937ae_2446_41f4_94eb_e928c5d449de.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd90937ae_2446_41f4_94eb_e928c5d449de.slice/crio-c4df3decba4cf0113c63157c9570993add444413bb09039c1c08e79e11969790\": RecentStats: unable to find data in memory cache]" Dec 09 15:14:07 crc kubenswrapper[4716]: I1209 15:14:07.824607 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9z9z6" event={"ID":"ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922","Type":"ContainerStarted","Data":"b5125c76d8b7c791be100ba5dbd812822be385265dac3d2c438b9c1da1e9371b"} Dec 09 15:14:07 crc kubenswrapper[4716]: I1209 15:14:07.826919 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ktf2s" event={"ID":"47c82d22-477b-4bd0-afd3-9ae2fed959f0","Type":"ContainerStarted","Data":"38e1e1ff19b24f6ed20f3e5fa1bb1ec3cfc4256523edcc3f6e2af103451198f0"} Dec 09 15:14:08 crc kubenswrapper[4716]: I1209 15:14:08.038814 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-2n29z"] Dec 09 15:14:08 crc kubenswrapper[4716]: I1209 15:14:08.040329 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2n29z" Dec 09 15:14:08 crc kubenswrapper[4716]: I1209 15:14:08.042486 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Dec 09 15:14:08 crc kubenswrapper[4716]: I1209 15:14:08.051842 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2n29z"] Dec 09 15:14:08 crc kubenswrapper[4716]: I1209 15:14:08.123570 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7rbs8\" (UniqueName: \"kubernetes.io/projected/5044a327-be08-455c-b84a-8d2aec4c3bb0-kube-api-access-7rbs8\") pod \"community-operators-2n29z\" (UID: \"5044a327-be08-455c-b84a-8d2aec4c3bb0\") " pod="openshift-marketplace/community-operators-2n29z" Dec 09 15:14:08 crc kubenswrapper[4716]: I1209 15:14:08.123656 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5044a327-be08-455c-b84a-8d2aec4c3bb0-catalog-content\") pod \"community-operators-2n29z\" (UID: \"5044a327-be08-455c-b84a-8d2aec4c3bb0\") " pod="openshift-marketplace/community-operators-2n29z" Dec 09 15:14:08 crc kubenswrapper[4716]: I1209 15:14:08.123695 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5044a327-be08-455c-b84a-8d2aec4c3bb0-utilities\") pod \"community-operators-2n29z\" (UID: \"5044a327-be08-455c-b84a-8d2aec4c3bb0\") " pod="openshift-marketplace/community-operators-2n29z" Dec 09 15:14:08 crc kubenswrapper[4716]: I1209 15:14:08.225644 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7rbs8\" (UniqueName: \"kubernetes.io/projected/5044a327-be08-455c-b84a-8d2aec4c3bb0-kube-api-access-7rbs8\") pod \"community-operators-2n29z\" (UID: \"5044a327-be08-455c-b84a-8d2aec4c3bb0\") " pod="openshift-marketplace/community-operators-2n29z" Dec 09 15:14:08 crc kubenswrapper[4716]: I1209 15:14:08.226167 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5044a327-be08-455c-b84a-8d2aec4c3bb0-catalog-content\") pod \"community-operators-2n29z\" (UID: \"5044a327-be08-455c-b84a-8d2aec4c3bb0\") " pod="openshift-marketplace/community-operators-2n29z" Dec 09 15:14:08 crc kubenswrapper[4716]: I1209 15:14:08.226204 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5044a327-be08-455c-b84a-8d2aec4c3bb0-utilities\") pod \"community-operators-2n29z\" (UID: \"5044a327-be08-455c-b84a-8d2aec4c3bb0\") " pod="openshift-marketplace/community-operators-2n29z" Dec 09 15:14:08 crc kubenswrapper[4716]: I1209 15:14:08.226796 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5044a327-be08-455c-b84a-8d2aec4c3bb0-catalog-content\") pod \"community-operators-2n29z\" (UID: \"5044a327-be08-455c-b84a-8d2aec4c3bb0\") " pod="openshift-marketplace/community-operators-2n29z" Dec 09 15:14:08 crc kubenswrapper[4716]: I1209 15:14:08.226842 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5044a327-be08-455c-b84a-8d2aec4c3bb0-utilities\") pod \"community-operators-2n29z\" (UID: \"5044a327-be08-455c-b84a-8d2aec4c3bb0\") " pod="openshift-marketplace/community-operators-2n29z" Dec 09 15:14:08 crc kubenswrapper[4716]: I1209 15:14:08.247124 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-zxsvb"] Dec 09 15:14:08 crc kubenswrapper[4716]: I1209 15:14:08.248445 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zxsvb" Dec 09 15:14:08 crc kubenswrapper[4716]: I1209 15:14:08.254077 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Dec 09 15:14:08 crc kubenswrapper[4716]: I1209 15:14:08.254086 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7rbs8\" (UniqueName: \"kubernetes.io/projected/5044a327-be08-455c-b84a-8d2aec4c3bb0-kube-api-access-7rbs8\") pod \"community-operators-2n29z\" (UID: \"5044a327-be08-455c-b84a-8d2aec4c3bb0\") " pod="openshift-marketplace/community-operators-2n29z" Dec 09 15:14:08 crc kubenswrapper[4716]: I1209 15:14:08.259267 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zxsvb"] Dec 09 15:14:08 crc kubenswrapper[4716]: I1209 15:14:08.328057 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ef94fdf4-dd68-4fe3-b935-b68c34d7814d-catalog-content\") pod \"certified-operators-zxsvb\" (UID: \"ef94fdf4-dd68-4fe3-b935-b68c34d7814d\") " pod="openshift-marketplace/certified-operators-zxsvb" Dec 09 15:14:08 crc kubenswrapper[4716]: I1209 15:14:08.328245 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r4qrf\" (UniqueName: \"kubernetes.io/projected/ef94fdf4-dd68-4fe3-b935-b68c34d7814d-kube-api-access-r4qrf\") pod \"certified-operators-zxsvb\" (UID: \"ef94fdf4-dd68-4fe3-b935-b68c34d7814d\") " pod="openshift-marketplace/certified-operators-zxsvb" Dec 09 15:14:08 crc kubenswrapper[4716]: I1209 15:14:08.328282 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ef94fdf4-dd68-4fe3-b935-b68c34d7814d-utilities\") pod \"certified-operators-zxsvb\" (UID: \"ef94fdf4-dd68-4fe3-b935-b68c34d7814d\") " pod="openshift-marketplace/certified-operators-zxsvb" Dec 09 15:14:08 crc kubenswrapper[4716]: I1209 15:14:08.363098 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2n29z" Dec 09 15:14:08 crc kubenswrapper[4716]: I1209 15:14:08.429966 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r4qrf\" (UniqueName: \"kubernetes.io/projected/ef94fdf4-dd68-4fe3-b935-b68c34d7814d-kube-api-access-r4qrf\") pod \"certified-operators-zxsvb\" (UID: \"ef94fdf4-dd68-4fe3-b935-b68c34d7814d\") " pod="openshift-marketplace/certified-operators-zxsvb" Dec 09 15:14:08 crc kubenswrapper[4716]: I1209 15:14:08.430023 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ef94fdf4-dd68-4fe3-b935-b68c34d7814d-utilities\") pod \"certified-operators-zxsvb\" (UID: \"ef94fdf4-dd68-4fe3-b935-b68c34d7814d\") " pod="openshift-marketplace/certified-operators-zxsvb" Dec 09 15:14:08 crc kubenswrapper[4716]: I1209 15:14:08.430047 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ef94fdf4-dd68-4fe3-b935-b68c34d7814d-catalog-content\") pod \"certified-operators-zxsvb\" (UID: \"ef94fdf4-dd68-4fe3-b935-b68c34d7814d\") " pod="openshift-marketplace/certified-operators-zxsvb" Dec 09 15:14:08 crc kubenswrapper[4716]: I1209 15:14:08.430785 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ef94fdf4-dd68-4fe3-b935-b68c34d7814d-utilities\") pod \"certified-operators-zxsvb\" (UID: \"ef94fdf4-dd68-4fe3-b935-b68c34d7814d\") " pod="openshift-marketplace/certified-operators-zxsvb" Dec 09 15:14:08 crc kubenswrapper[4716]: I1209 15:14:08.431002 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ef94fdf4-dd68-4fe3-b935-b68c34d7814d-catalog-content\") pod \"certified-operators-zxsvb\" (UID: \"ef94fdf4-dd68-4fe3-b935-b68c34d7814d\") " pod="openshift-marketplace/certified-operators-zxsvb" Dec 09 15:14:08 crc kubenswrapper[4716]: I1209 15:14:08.455141 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r4qrf\" (UniqueName: \"kubernetes.io/projected/ef94fdf4-dd68-4fe3-b935-b68c34d7814d-kube-api-access-r4qrf\") pod \"certified-operators-zxsvb\" (UID: \"ef94fdf4-dd68-4fe3-b935-b68c34d7814d\") " pod="openshift-marketplace/certified-operators-zxsvb" Dec 09 15:14:08 crc kubenswrapper[4716]: I1209 15:14:08.595527 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zxsvb" Dec 09 15:14:08 crc kubenswrapper[4716]: I1209 15:14:08.796114 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2n29z"] Dec 09 15:14:08 crc kubenswrapper[4716]: W1209 15:14:08.808560 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5044a327_be08_455c_b84a_8d2aec4c3bb0.slice/crio-85191f7b2858ef0098c6b862ce18170f3c71e0a4ae5bb7f10f4c197367a65ae9 WatchSource:0}: Error finding container 85191f7b2858ef0098c6b862ce18170f3c71e0a4ae5bb7f10f4c197367a65ae9: Status 404 returned error can't find the container with id 85191f7b2858ef0098c6b862ce18170f3c71e0a4ae5bb7f10f4c197367a65ae9 Dec 09 15:14:08 crc kubenswrapper[4716]: I1209 15:14:08.838215 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2n29z" event={"ID":"5044a327-be08-455c-b84a-8d2aec4c3bb0","Type":"ContainerStarted","Data":"85191f7b2858ef0098c6b862ce18170f3c71e0a4ae5bb7f10f4c197367a65ae9"} Dec 09 15:14:08 crc kubenswrapper[4716]: I1209 15:14:08.840422 4716 generic.go:334] "Generic (PLEG): container finished" podID="ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922" containerID="b5125c76d8b7c791be100ba5dbd812822be385265dac3d2c438b9c1da1e9371b" exitCode=0 Dec 09 15:14:08 crc kubenswrapper[4716]: I1209 15:14:08.840468 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9z9z6" event={"ID":"ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922","Type":"ContainerDied","Data":"b5125c76d8b7c791be100ba5dbd812822be385265dac3d2c438b9c1da1e9371b"} Dec 09 15:14:08 crc kubenswrapper[4716]: I1209 15:14:08.845170 4716 generic.go:334] "Generic (PLEG): container finished" podID="47c82d22-477b-4bd0-afd3-9ae2fed959f0" containerID="38e1e1ff19b24f6ed20f3e5fa1bb1ec3cfc4256523edcc3f6e2af103451198f0" exitCode=0 Dec 09 15:14:08 crc kubenswrapper[4716]: I1209 15:14:08.845215 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ktf2s" event={"ID":"47c82d22-477b-4bd0-afd3-9ae2fed959f0","Type":"ContainerDied","Data":"38e1e1ff19b24f6ed20f3e5fa1bb1ec3cfc4256523edcc3f6e2af103451198f0"} Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.019509 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zxsvb"] Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.325458 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-76df65d987-xclgl"] Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.325779 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-76df65d987-xclgl" podUID="fd87f5f0-c377-48e6-b58a-893c47cd88cb" containerName="controller-manager" containerID="cri-o://033d548da9ce1adf788a4155f023b4644c97459a0fca2459867009c22b4671ba" gracePeriod=30 Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.334465 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-775b55bb67-rf79d"] Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.334798 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-775b55bb67-rf79d" podUID="3501701f-68b4-4e0c-8bdc-2b005b838e5c" containerName="route-controller-manager" containerID="cri-o://550e98e15d1b4c863674e43b7eda9e037cad96d385582a3583b636c4d97c7b97" gracePeriod=30 Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.831195 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-775b55bb67-rf79d" Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.848730 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-76df65d987-xclgl" Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.851325 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3501701f-68b4-4e0c-8bdc-2b005b838e5c-client-ca\") pod \"3501701f-68b4-4e0c-8bdc-2b005b838e5c\" (UID: \"3501701f-68b4-4e0c-8bdc-2b005b838e5c\") " Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.851458 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3501701f-68b4-4e0c-8bdc-2b005b838e5c-config\") pod \"3501701f-68b4-4e0c-8bdc-2b005b838e5c\" (UID: \"3501701f-68b4-4e0c-8bdc-2b005b838e5c\") " Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.851570 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3501701f-68b4-4e0c-8bdc-2b005b838e5c-serving-cert\") pod \"3501701f-68b4-4e0c-8bdc-2b005b838e5c\" (UID: \"3501701f-68b4-4e0c-8bdc-2b005b838e5c\") " Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.851647 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bvb8s\" (UniqueName: \"kubernetes.io/projected/3501701f-68b4-4e0c-8bdc-2b005b838e5c-kube-api-access-bvb8s\") pod \"3501701f-68b4-4e0c-8bdc-2b005b838e5c\" (UID: \"3501701f-68b4-4e0c-8bdc-2b005b838e5c\") " Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.852527 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3501701f-68b4-4e0c-8bdc-2b005b838e5c-client-ca" (OuterVolumeSpecName: "client-ca") pod "3501701f-68b4-4e0c-8bdc-2b005b838e5c" (UID: "3501701f-68b4-4e0c-8bdc-2b005b838e5c"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.852563 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3501701f-68b4-4e0c-8bdc-2b005b838e5c-config" (OuterVolumeSpecName: "config") pod "3501701f-68b4-4e0c-8bdc-2b005b838e5c" (UID: "3501701f-68b4-4e0c-8bdc-2b005b838e5c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.852758 4716 generic.go:334] "Generic (PLEG): container finished" podID="3501701f-68b4-4e0c-8bdc-2b005b838e5c" containerID="550e98e15d1b4c863674e43b7eda9e037cad96d385582a3583b636c4d97c7b97" exitCode=0 Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.852854 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-775b55bb67-rf79d" event={"ID":"3501701f-68b4-4e0c-8bdc-2b005b838e5c","Type":"ContainerDied","Data":"550e98e15d1b4c863674e43b7eda9e037cad96d385582a3583b636c4d97c7b97"} Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.852891 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-775b55bb67-rf79d" event={"ID":"3501701f-68b4-4e0c-8bdc-2b005b838e5c","Type":"ContainerDied","Data":"7da97f2d32b4dd950f222e9cff785357f2a99a45ae1d01a54151bc01ca483aed"} Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.852888 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-775b55bb67-rf79d" Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.852913 4716 scope.go:117] "RemoveContainer" containerID="550e98e15d1b4c863674e43b7eda9e037cad96d385582a3583b636c4d97c7b97" Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.861800 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9z9z6" event={"ID":"ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922","Type":"ContainerStarted","Data":"a2a58763f765477117edd588ce35afb6e4fa9051ec45d86d1bc2dd658468b41a"} Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.862718 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3501701f-68b4-4e0c-8bdc-2b005b838e5c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "3501701f-68b4-4e0c-8bdc-2b005b838e5c" (UID: "3501701f-68b4-4e0c-8bdc-2b005b838e5c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.877498 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3501701f-68b4-4e0c-8bdc-2b005b838e5c-kube-api-access-bvb8s" (OuterVolumeSpecName: "kube-api-access-bvb8s") pod "3501701f-68b4-4e0c-8bdc-2b005b838e5c" (UID: "3501701f-68b4-4e0c-8bdc-2b005b838e5c"). InnerVolumeSpecName "kube-api-access-bvb8s". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.884145 4716 scope.go:117] "RemoveContainer" containerID="550e98e15d1b4c863674e43b7eda9e037cad96d385582a3583b636c4d97c7b97" Dec 09 15:14:09 crc kubenswrapper[4716]: E1209 15:14:09.885160 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"550e98e15d1b4c863674e43b7eda9e037cad96d385582a3583b636c4d97c7b97\": container with ID starting with 550e98e15d1b4c863674e43b7eda9e037cad96d385582a3583b636c4d97c7b97 not found: ID does not exist" containerID="550e98e15d1b4c863674e43b7eda9e037cad96d385582a3583b636c4d97c7b97" Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.885213 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"550e98e15d1b4c863674e43b7eda9e037cad96d385582a3583b636c4d97c7b97"} err="failed to get container status \"550e98e15d1b4c863674e43b7eda9e037cad96d385582a3583b636c4d97c7b97\": rpc error: code = NotFound desc = could not find container \"550e98e15d1b4c863674e43b7eda9e037cad96d385582a3583b636c4d97c7b97\": container with ID starting with 550e98e15d1b4c863674e43b7eda9e037cad96d385582a3583b636c4d97c7b97 not found: ID does not exist" Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.886563 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ktf2s" event={"ID":"47c82d22-477b-4bd0-afd3-9ae2fed959f0","Type":"ContainerStarted","Data":"feb1b1be9af19415a51cf311d7d76a5878459be6b9416a0061a68652ddd55ab9"} Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.889240 4716 generic.go:334] "Generic (PLEG): container finished" podID="ef94fdf4-dd68-4fe3-b935-b68c34d7814d" containerID="c08a6f2d8e9bbda4e175bf56479eee7a75790052d0c2db91378bd07f91a85855" exitCode=0 Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.889388 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zxsvb" event={"ID":"ef94fdf4-dd68-4fe3-b935-b68c34d7814d","Type":"ContainerDied","Data":"c08a6f2d8e9bbda4e175bf56479eee7a75790052d0c2db91378bd07f91a85855"} Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.889421 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zxsvb" event={"ID":"ef94fdf4-dd68-4fe3-b935-b68c34d7814d","Type":"ContainerStarted","Data":"014e28478f3d888dcdff9e05108ec36e5ef2eb1051a5aa1d291ced24cfb9568c"} Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.891939 4716 generic.go:334] "Generic (PLEG): container finished" podID="fd87f5f0-c377-48e6-b58a-893c47cd88cb" containerID="033d548da9ce1adf788a4155f023b4644c97459a0fca2459867009c22b4671ba" exitCode=0 Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.892015 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-76df65d987-xclgl" event={"ID":"fd87f5f0-c377-48e6-b58a-893c47cd88cb","Type":"ContainerDied","Data":"033d548da9ce1adf788a4155f023b4644c97459a0fca2459867009c22b4671ba"} Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.892045 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-76df65d987-xclgl" event={"ID":"fd87f5f0-c377-48e6-b58a-893c47cd88cb","Type":"ContainerDied","Data":"3537f526db738666860d60e029d70459915a2f81dab7bad07bc1e44957b1e3a9"} Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.892068 4716 scope.go:117] "RemoveContainer" containerID="033d548da9ce1adf788a4155f023b4644c97459a0fca2459867009c22b4671ba" Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.892153 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-76df65d987-xclgl" Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.894565 4716 generic.go:334] "Generic (PLEG): container finished" podID="5044a327-be08-455c-b84a-8d2aec4c3bb0" containerID="3978679cd81abddfa795a8818be439a40f6bfa50eb49d22aebfad163d2d380de" exitCode=0 Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.894613 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2n29z" event={"ID":"5044a327-be08-455c-b84a-8d2aec4c3bb0","Type":"ContainerDied","Data":"3978679cd81abddfa795a8818be439a40f6bfa50eb49d22aebfad163d2d380de"} Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.909691 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-9z9z6" podStartSLOduration=2.468596943 podStartE2EDuration="4.909672409s" podCreationTimestamp="2025-12-09 15:14:05 +0000 UTC" firstStartedPulling="2025-12-09 15:14:06.800662826 +0000 UTC m=+333.955406814" lastFinishedPulling="2025-12-09 15:14:09.241738292 +0000 UTC m=+336.396482280" observedRunningTime="2025-12-09 15:14:09.905408534 +0000 UTC m=+337.060152522" watchObservedRunningTime="2025-12-09 15:14:09.909672409 +0000 UTC m=+337.064416397" Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.924900 4716 scope.go:117] "RemoveContainer" containerID="033d548da9ce1adf788a4155f023b4644c97459a0fca2459867009c22b4671ba" Dec 09 15:14:09 crc kubenswrapper[4716]: E1209 15:14:09.925938 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"033d548da9ce1adf788a4155f023b4644c97459a0fca2459867009c22b4671ba\": container with ID starting with 033d548da9ce1adf788a4155f023b4644c97459a0fca2459867009c22b4671ba not found: ID does not exist" containerID="033d548da9ce1adf788a4155f023b4644c97459a0fca2459867009c22b4671ba" Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.926009 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"033d548da9ce1adf788a4155f023b4644c97459a0fca2459867009c22b4671ba"} err="failed to get container status \"033d548da9ce1adf788a4155f023b4644c97459a0fca2459867009c22b4671ba\": rpc error: code = NotFound desc = could not find container \"033d548da9ce1adf788a4155f023b4644c97459a0fca2459867009c22b4671ba\": container with ID starting with 033d548da9ce1adf788a4155f023b4644c97459a0fca2459867009c22b4671ba not found: ID does not exist" Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.954151 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-ktf2s" podStartSLOduration=2.515100904 podStartE2EDuration="4.954105769s" podCreationTimestamp="2025-12-09 15:14:05 +0000 UTC" firstStartedPulling="2025-12-09 15:14:06.803698055 +0000 UTC m=+333.958442043" lastFinishedPulling="2025-12-09 15:14:09.24270292 +0000 UTC m=+336.397446908" observedRunningTime="2025-12-09 15:14:09.945342671 +0000 UTC m=+337.100086679" watchObservedRunningTime="2025-12-09 15:14:09.954105769 +0000 UTC m=+337.108849757" Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.954266 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fd87f5f0-c377-48e6-b58a-893c47cd88cb-client-ca\") pod \"fd87f5f0-c377-48e6-b58a-893c47cd88cb\" (UID: \"fd87f5f0-c377-48e6-b58a-893c47cd88cb\") " Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.954342 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fd87f5f0-c377-48e6-b58a-893c47cd88cb-config\") pod \"fd87f5f0-c377-48e6-b58a-893c47cd88cb\" (UID: \"fd87f5f0-c377-48e6-b58a-893c47cd88cb\") " Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.954499 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6mz9l\" (UniqueName: \"kubernetes.io/projected/fd87f5f0-c377-48e6-b58a-893c47cd88cb-kube-api-access-6mz9l\") pod \"fd87f5f0-c377-48e6-b58a-893c47cd88cb\" (UID: \"fd87f5f0-c377-48e6-b58a-893c47cd88cb\") " Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.954551 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fd87f5f0-c377-48e6-b58a-893c47cd88cb-serving-cert\") pod \"fd87f5f0-c377-48e6-b58a-893c47cd88cb\" (UID: \"fd87f5f0-c377-48e6-b58a-893c47cd88cb\") " Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.954600 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/fd87f5f0-c377-48e6-b58a-893c47cd88cb-proxy-ca-bundles\") pod \"fd87f5f0-c377-48e6-b58a-893c47cd88cb\" (UID: \"fd87f5f0-c377-48e6-b58a-893c47cd88cb\") " Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.955852 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fd87f5f0-c377-48e6-b58a-893c47cd88cb-config" (OuterVolumeSpecName: "config") pod "fd87f5f0-c377-48e6-b58a-893c47cd88cb" (UID: "fd87f5f0-c377-48e6-b58a-893c47cd88cb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.956458 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fd87f5f0-c377-48e6-b58a-893c47cd88cb-client-ca" (OuterVolumeSpecName: "client-ca") pod "fd87f5f0-c377-48e6-b58a-893c47cd88cb" (UID: "fd87f5f0-c377-48e6-b58a-893c47cd88cb"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.957173 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fd87f5f0-c377-48e6-b58a-893c47cd88cb-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "fd87f5f0-c377-48e6-b58a-893c47cd88cb" (UID: "fd87f5f0-c377-48e6-b58a-893c47cd88cb"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.958113 4716 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3501701f-68b4-4e0c-8bdc-2b005b838e5c-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.958154 4716 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/fd87f5f0-c377-48e6-b58a-893c47cd88cb-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.958173 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bvb8s\" (UniqueName: \"kubernetes.io/projected/3501701f-68b4-4e0c-8bdc-2b005b838e5c-kube-api-access-bvb8s\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.958188 4716 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fd87f5f0-c377-48e6-b58a-893c47cd88cb-client-ca\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.958205 4716 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3501701f-68b4-4e0c-8bdc-2b005b838e5c-client-ca\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.958218 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fd87f5f0-c377-48e6-b58a-893c47cd88cb-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.958231 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3501701f-68b4-4e0c-8bdc-2b005b838e5c-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.962653 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd87f5f0-c377-48e6-b58a-893c47cd88cb-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "fd87f5f0-c377-48e6-b58a-893c47cd88cb" (UID: "fd87f5f0-c377-48e6-b58a-893c47cd88cb"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:14:09 crc kubenswrapper[4716]: I1209 15:14:09.973612 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd87f5f0-c377-48e6-b58a-893c47cd88cb-kube-api-access-6mz9l" (OuterVolumeSpecName: "kube-api-access-6mz9l") pod "fd87f5f0-c377-48e6-b58a-893c47cd88cb" (UID: "fd87f5f0-c377-48e6-b58a-893c47cd88cb"). InnerVolumeSpecName "kube-api-access-6mz9l". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.059247 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6mz9l\" (UniqueName: \"kubernetes.io/projected/fd87f5f0-c377-48e6-b58a-893c47cd88cb-kube-api-access-6mz9l\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.059295 4716 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fd87f5f0-c377-48e6-b58a-893c47cd88cb-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.189105 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-775b55bb67-rf79d"] Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.194318 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-775b55bb67-rf79d"] Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.226692 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-76df65d987-xclgl"] Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.229921 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-76df65d987-xclgl"] Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.534274 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-6bb9db44d5-4c8x2"] Dec 09 15:14:10 crc kubenswrapper[4716]: E1209 15:14:10.535030 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd87f5f0-c377-48e6-b58a-893c47cd88cb" containerName="controller-manager" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.535057 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd87f5f0-c377-48e6-b58a-893c47cd88cb" containerName="controller-manager" Dec 09 15:14:10 crc kubenswrapper[4716]: E1209 15:14:10.535076 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3501701f-68b4-4e0c-8bdc-2b005b838e5c" containerName="route-controller-manager" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.535083 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="3501701f-68b4-4e0c-8bdc-2b005b838e5c" containerName="route-controller-manager" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.535198 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="3501701f-68b4-4e0c-8bdc-2b005b838e5c" containerName="route-controller-manager" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.535216 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd87f5f0-c377-48e6-b58a-893c47cd88cb" containerName="controller-manager" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.535758 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6bb9db44d5-4c8x2" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.538829 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.539028 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.539175 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.541233 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.542256 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-64f96cc955-5ncj5"] Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.542955 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.543061 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-5ncj5" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.545504 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.546076 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-64f96cc955-5ncj5"] Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.550730 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.551011 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.551159 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.551298 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.551310 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.555541 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.559385 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.563100 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6bb9db44d5-4c8x2"] Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.566250 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9s2k\" (UniqueName: \"kubernetes.io/projected/a159c3ee-5dbf-4e2c-8394-b9c8577e855a-kube-api-access-p9s2k\") pod \"controller-manager-6bb9db44d5-4c8x2\" (UID: \"a159c3ee-5dbf-4e2c-8394-b9c8577e855a\") " pod="openshift-controller-manager/controller-manager-6bb9db44d5-4c8x2" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.566305 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a159c3ee-5dbf-4e2c-8394-b9c8577e855a-config\") pod \"controller-manager-6bb9db44d5-4c8x2\" (UID: \"a159c3ee-5dbf-4e2c-8394-b9c8577e855a\") " pod="openshift-controller-manager/controller-manager-6bb9db44d5-4c8x2" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.566354 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a159c3ee-5dbf-4e2c-8394-b9c8577e855a-client-ca\") pod \"controller-manager-6bb9db44d5-4c8x2\" (UID: \"a159c3ee-5dbf-4e2c-8394-b9c8577e855a\") " pod="openshift-controller-manager/controller-manager-6bb9db44d5-4c8x2" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.566376 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a159c3ee-5dbf-4e2c-8394-b9c8577e855a-serving-cert\") pod \"controller-manager-6bb9db44d5-4c8x2\" (UID: \"a159c3ee-5dbf-4e2c-8394-b9c8577e855a\") " pod="openshift-controller-manager/controller-manager-6bb9db44d5-4c8x2" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.566404 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a159c3ee-5dbf-4e2c-8394-b9c8577e855a-proxy-ca-bundles\") pod \"controller-manager-6bb9db44d5-4c8x2\" (UID: \"a159c3ee-5dbf-4e2c-8394-b9c8577e855a\") " pod="openshift-controller-manager/controller-manager-6bb9db44d5-4c8x2" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.667497 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a159c3ee-5dbf-4e2c-8394-b9c8577e855a-serving-cert\") pod \"controller-manager-6bb9db44d5-4c8x2\" (UID: \"a159c3ee-5dbf-4e2c-8394-b9c8577e855a\") " pod="openshift-controller-manager/controller-manager-6bb9db44d5-4c8x2" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.667548 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a159c3ee-5dbf-4e2c-8394-b9c8577e855a-proxy-ca-bundles\") pod \"controller-manager-6bb9db44d5-4c8x2\" (UID: \"a159c3ee-5dbf-4e2c-8394-b9c8577e855a\") " pod="openshift-controller-manager/controller-manager-6bb9db44d5-4c8x2" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.667938 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3db2b126-c74c-4b52-85ac-19606f3c6ea5-config\") pod \"route-controller-manager-64f96cc955-5ncj5\" (UID: \"3db2b126-c74c-4b52-85ac-19606f3c6ea5\") " pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-5ncj5" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.668006 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9s2k\" (UniqueName: \"kubernetes.io/projected/a159c3ee-5dbf-4e2c-8394-b9c8577e855a-kube-api-access-p9s2k\") pod \"controller-manager-6bb9db44d5-4c8x2\" (UID: \"a159c3ee-5dbf-4e2c-8394-b9c8577e855a\") " pod="openshift-controller-manager/controller-manager-6bb9db44d5-4c8x2" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.668033 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a159c3ee-5dbf-4e2c-8394-b9c8577e855a-config\") pod \"controller-manager-6bb9db44d5-4c8x2\" (UID: \"a159c3ee-5dbf-4e2c-8394-b9c8577e855a\") " pod="openshift-controller-manager/controller-manager-6bb9db44d5-4c8x2" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.668089 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3db2b126-c74c-4b52-85ac-19606f3c6ea5-client-ca\") pod \"route-controller-manager-64f96cc955-5ncj5\" (UID: \"3db2b126-c74c-4b52-85ac-19606f3c6ea5\") " pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-5ncj5" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.668115 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3db2b126-c74c-4b52-85ac-19606f3c6ea5-serving-cert\") pod \"route-controller-manager-64f96cc955-5ncj5\" (UID: \"3db2b126-c74c-4b52-85ac-19606f3c6ea5\") " pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-5ncj5" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.668143 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ld5zc\" (UniqueName: \"kubernetes.io/projected/3db2b126-c74c-4b52-85ac-19606f3c6ea5-kube-api-access-ld5zc\") pod \"route-controller-manager-64f96cc955-5ncj5\" (UID: \"3db2b126-c74c-4b52-85ac-19606f3c6ea5\") " pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-5ncj5" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.668172 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a159c3ee-5dbf-4e2c-8394-b9c8577e855a-client-ca\") pod \"controller-manager-6bb9db44d5-4c8x2\" (UID: \"a159c3ee-5dbf-4e2c-8394-b9c8577e855a\") " pod="openshift-controller-manager/controller-manager-6bb9db44d5-4c8x2" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.669235 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a159c3ee-5dbf-4e2c-8394-b9c8577e855a-proxy-ca-bundles\") pod \"controller-manager-6bb9db44d5-4c8x2\" (UID: \"a159c3ee-5dbf-4e2c-8394-b9c8577e855a\") " pod="openshift-controller-manager/controller-manager-6bb9db44d5-4c8x2" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.669377 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a159c3ee-5dbf-4e2c-8394-b9c8577e855a-client-ca\") pod \"controller-manager-6bb9db44d5-4c8x2\" (UID: \"a159c3ee-5dbf-4e2c-8394-b9c8577e855a\") " pod="openshift-controller-manager/controller-manager-6bb9db44d5-4c8x2" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.669458 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a159c3ee-5dbf-4e2c-8394-b9c8577e855a-config\") pod \"controller-manager-6bb9db44d5-4c8x2\" (UID: \"a159c3ee-5dbf-4e2c-8394-b9c8577e855a\") " pod="openshift-controller-manager/controller-manager-6bb9db44d5-4c8x2" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.682959 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a159c3ee-5dbf-4e2c-8394-b9c8577e855a-serving-cert\") pod \"controller-manager-6bb9db44d5-4c8x2\" (UID: \"a159c3ee-5dbf-4e2c-8394-b9c8577e855a\") " pod="openshift-controller-manager/controller-manager-6bb9db44d5-4c8x2" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.689441 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9s2k\" (UniqueName: \"kubernetes.io/projected/a159c3ee-5dbf-4e2c-8394-b9c8577e855a-kube-api-access-p9s2k\") pod \"controller-manager-6bb9db44d5-4c8x2\" (UID: \"a159c3ee-5dbf-4e2c-8394-b9c8577e855a\") " pod="openshift-controller-manager/controller-manager-6bb9db44d5-4c8x2" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.769302 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3db2b126-c74c-4b52-85ac-19606f3c6ea5-client-ca\") pod \"route-controller-manager-64f96cc955-5ncj5\" (UID: \"3db2b126-c74c-4b52-85ac-19606f3c6ea5\") " pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-5ncj5" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.769378 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3db2b126-c74c-4b52-85ac-19606f3c6ea5-serving-cert\") pod \"route-controller-manager-64f96cc955-5ncj5\" (UID: \"3db2b126-c74c-4b52-85ac-19606f3c6ea5\") " pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-5ncj5" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.769418 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ld5zc\" (UniqueName: \"kubernetes.io/projected/3db2b126-c74c-4b52-85ac-19606f3c6ea5-kube-api-access-ld5zc\") pod \"route-controller-manager-64f96cc955-5ncj5\" (UID: \"3db2b126-c74c-4b52-85ac-19606f3c6ea5\") " pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-5ncj5" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.769461 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3db2b126-c74c-4b52-85ac-19606f3c6ea5-config\") pod \"route-controller-manager-64f96cc955-5ncj5\" (UID: \"3db2b126-c74c-4b52-85ac-19606f3c6ea5\") " pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-5ncj5" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.770371 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3db2b126-c74c-4b52-85ac-19606f3c6ea5-client-ca\") pod \"route-controller-manager-64f96cc955-5ncj5\" (UID: \"3db2b126-c74c-4b52-85ac-19606f3c6ea5\") " pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-5ncj5" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.770669 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3db2b126-c74c-4b52-85ac-19606f3c6ea5-config\") pod \"route-controller-manager-64f96cc955-5ncj5\" (UID: \"3db2b126-c74c-4b52-85ac-19606f3c6ea5\") " pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-5ncj5" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.774219 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3db2b126-c74c-4b52-85ac-19606f3c6ea5-serving-cert\") pod \"route-controller-manager-64f96cc955-5ncj5\" (UID: \"3db2b126-c74c-4b52-85ac-19606f3c6ea5\") " pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-5ncj5" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.796681 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ld5zc\" (UniqueName: \"kubernetes.io/projected/3db2b126-c74c-4b52-85ac-19606f3c6ea5-kube-api-access-ld5zc\") pod \"route-controller-manager-64f96cc955-5ncj5\" (UID: \"3db2b126-c74c-4b52-85ac-19606f3c6ea5\") " pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-5ncj5" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.868755 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6bb9db44d5-4c8x2" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.882984 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-5ncj5" Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.956407 4716 generic.go:334] "Generic (PLEG): container finished" podID="ef94fdf4-dd68-4fe3-b935-b68c34d7814d" containerID="f861d85dadd4ca00a9e8e2cd8341118ab9aa39f80adbf41555c78dd510548cae" exitCode=0 Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.956607 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zxsvb" event={"ID":"ef94fdf4-dd68-4fe3-b935-b68c34d7814d","Type":"ContainerDied","Data":"f861d85dadd4ca00a9e8e2cd8341118ab9aa39f80adbf41555c78dd510548cae"} Dec 09 15:14:10 crc kubenswrapper[4716]: I1209 15:14:10.961723 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2n29z" event={"ID":"5044a327-be08-455c-b84a-8d2aec4c3bb0","Type":"ContainerStarted","Data":"02052d50fcab7c198d058883948a5bea7ffb4a8c240e7a9bd4de2477105585e3"} Dec 09 15:14:11 crc kubenswrapper[4716]: I1209 15:14:11.237483 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3501701f-68b4-4e0c-8bdc-2b005b838e5c" path="/var/lib/kubelet/pods/3501701f-68b4-4e0c-8bdc-2b005b838e5c/volumes" Dec 09 15:14:11 crc kubenswrapper[4716]: I1209 15:14:11.238532 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd87f5f0-c377-48e6-b58a-893c47cd88cb" path="/var/lib/kubelet/pods/fd87f5f0-c377-48e6-b58a-893c47cd88cb/volumes" Dec 09 15:14:11 crc kubenswrapper[4716]: I1209 15:14:11.387491 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6bb9db44d5-4c8x2"] Dec 09 15:14:11 crc kubenswrapper[4716]: W1209 15:14:11.392139 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda159c3ee_5dbf_4e2c_8394_b9c8577e855a.slice/crio-de5d46ec2edcf788b94c780369cb57d0b09147a76aa844755ad51db32a42347e WatchSource:0}: Error finding container de5d46ec2edcf788b94c780369cb57d0b09147a76aa844755ad51db32a42347e: Status 404 returned error can't find the container with id de5d46ec2edcf788b94c780369cb57d0b09147a76aa844755ad51db32a42347e Dec 09 15:14:11 crc kubenswrapper[4716]: I1209 15:14:11.448935 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-64f96cc955-5ncj5"] Dec 09 15:14:11 crc kubenswrapper[4716]: W1209 15:14:11.460138 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3db2b126_c74c_4b52_85ac_19606f3c6ea5.slice/crio-efeb4b5e811ec12004c6fbf948fcc76042fd5619b72e1e076456d766c47791dd WatchSource:0}: Error finding container efeb4b5e811ec12004c6fbf948fcc76042fd5619b72e1e076456d766c47791dd: Status 404 returned error can't find the container with id efeb4b5e811ec12004c6fbf948fcc76042fd5619b72e1e076456d766c47791dd Dec 09 15:14:11 crc kubenswrapper[4716]: I1209 15:14:11.990484 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zxsvb" event={"ID":"ef94fdf4-dd68-4fe3-b935-b68c34d7814d","Type":"ContainerStarted","Data":"aaaf08acd1620ff8c2b07cc4f0f49cae23735e055e9551eb96c5945b9fa3f8d4"} Dec 09 15:14:11 crc kubenswrapper[4716]: I1209 15:14:11.992455 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6bb9db44d5-4c8x2" event={"ID":"a159c3ee-5dbf-4e2c-8394-b9c8577e855a","Type":"ContainerStarted","Data":"060cc33c40e056dcaf0569ddf88cd5d7449e84aa4f3d4dcd7b1ec09addbfd54e"} Dec 09 15:14:11 crc kubenswrapper[4716]: I1209 15:14:11.992564 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-6bb9db44d5-4c8x2" Dec 09 15:14:11 crc kubenswrapper[4716]: I1209 15:14:11.992578 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6bb9db44d5-4c8x2" event={"ID":"a159c3ee-5dbf-4e2c-8394-b9c8577e855a","Type":"ContainerStarted","Data":"de5d46ec2edcf788b94c780369cb57d0b09147a76aa844755ad51db32a42347e"} Dec 09 15:14:11 crc kubenswrapper[4716]: I1209 15:14:11.994988 4716 generic.go:334] "Generic (PLEG): container finished" podID="5044a327-be08-455c-b84a-8d2aec4c3bb0" containerID="02052d50fcab7c198d058883948a5bea7ffb4a8c240e7a9bd4de2477105585e3" exitCode=0 Dec 09 15:14:11 crc kubenswrapper[4716]: I1209 15:14:11.995090 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2n29z" event={"ID":"5044a327-be08-455c-b84a-8d2aec4c3bb0","Type":"ContainerDied","Data":"02052d50fcab7c198d058883948a5bea7ffb4a8c240e7a9bd4de2477105585e3"} Dec 09 15:14:11 crc kubenswrapper[4716]: I1209 15:14:11.995133 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2n29z" event={"ID":"5044a327-be08-455c-b84a-8d2aec4c3bb0","Type":"ContainerStarted","Data":"5d7705d112e422237ed3847ca7979fe5316bcf5bf98605c751281a54cfba589e"} Dec 09 15:14:11 crc kubenswrapper[4716]: I1209 15:14:11.997834 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-5ncj5" event={"ID":"3db2b126-c74c-4b52-85ac-19606f3c6ea5","Type":"ContainerStarted","Data":"a756fa3b9de34381ae860fb572583b9b9e06bbad3b770038539d9bc5dce9142b"} Dec 09 15:14:11 crc kubenswrapper[4716]: I1209 15:14:11.997864 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-5ncj5" event={"ID":"3db2b126-c74c-4b52-85ac-19606f3c6ea5","Type":"ContainerStarted","Data":"efeb4b5e811ec12004c6fbf948fcc76042fd5619b72e1e076456d766c47791dd"} Dec 09 15:14:11 crc kubenswrapper[4716]: I1209 15:14:11.998275 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-5ncj5" Dec 09 15:14:12 crc kubenswrapper[4716]: I1209 15:14:12.018987 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-6bb9db44d5-4c8x2" Dec 09 15:14:12 crc kubenswrapper[4716]: I1209 15:14:12.034177 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-zxsvb" podStartSLOduration=2.561286058 podStartE2EDuration="4.034153863s" podCreationTimestamp="2025-12-09 15:14:08 +0000 UTC" firstStartedPulling="2025-12-09 15:14:09.891082362 +0000 UTC m=+337.045826350" lastFinishedPulling="2025-12-09 15:14:11.363950167 +0000 UTC m=+338.518694155" observedRunningTime="2025-12-09 15:14:12.015014529 +0000 UTC m=+339.169758517" watchObservedRunningTime="2025-12-09 15:14:12.034153863 +0000 UTC m=+339.188897851" Dec 09 15:14:12 crc kubenswrapper[4716]: I1209 15:14:12.034739 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-6bb9db44d5-4c8x2" podStartSLOduration=3.03473258 podStartE2EDuration="3.03473258s" podCreationTimestamp="2025-12-09 15:14:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:14:12.030092833 +0000 UTC m=+339.184836821" watchObservedRunningTime="2025-12-09 15:14:12.03473258 +0000 UTC m=+339.189476568" Dec 09 15:14:12 crc kubenswrapper[4716]: I1209 15:14:12.063997 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-2n29z" podStartSLOduration=2.569681255 podStartE2EDuration="4.063964862s" podCreationTimestamp="2025-12-09 15:14:08 +0000 UTC" firstStartedPulling="2025-12-09 15:14:09.8981428 +0000 UTC m=+337.052886788" lastFinishedPulling="2025-12-09 15:14:11.392426407 +0000 UTC m=+338.547170395" observedRunningTime="2025-12-09 15:14:12.061821078 +0000 UTC m=+339.216565076" watchObservedRunningTime="2025-12-09 15:14:12.063964862 +0000 UTC m=+339.218708850" Dec 09 15:14:12 crc kubenswrapper[4716]: I1209 15:14:12.089969 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-5ncj5" podStartSLOduration=3.089948848 podStartE2EDuration="3.089948848s" podCreationTimestamp="2025-12-09 15:14:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:14:12.085023052 +0000 UTC m=+339.239767040" watchObservedRunningTime="2025-12-09 15:14:12.089948848 +0000 UTC m=+339.244692836" Dec 09 15:14:12 crc kubenswrapper[4716]: I1209 15:14:12.108568 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-64f96cc955-5ncj5" Dec 09 15:14:12 crc kubenswrapper[4716]: I1209 15:14:12.856102 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-hgf8l" Dec 09 15:14:12 crc kubenswrapper[4716]: I1209 15:14:12.923372 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-ht662"] Dec 09 15:14:15 crc kubenswrapper[4716]: I1209 15:14:15.961187 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-ktf2s" Dec 09 15:14:15 crc kubenswrapper[4716]: I1209 15:14:15.962042 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-ktf2s" Dec 09 15:14:16 crc kubenswrapper[4716]: I1209 15:14:16.013775 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-ktf2s" Dec 09 15:14:16 crc kubenswrapper[4716]: I1209 15:14:16.071101 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-ktf2s" Dec 09 15:14:16 crc kubenswrapper[4716]: I1209 15:14:16.165594 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9z9z6" Dec 09 15:14:16 crc kubenswrapper[4716]: I1209 15:14:16.165687 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-9z9z6" Dec 09 15:14:16 crc kubenswrapper[4716]: I1209 15:14:16.211447 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-9z9z6" Dec 09 15:14:17 crc kubenswrapper[4716]: E1209 15:14:17.001959 4716 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd90937ae_2446_41f4_94eb_e928c5d449de.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd90937ae_2446_41f4_94eb_e928c5d449de.slice/crio-c4df3decba4cf0113c63157c9570993add444413bb09039c1c08e79e11969790\": RecentStats: unable to find data in memory cache]" Dec 09 15:14:17 crc kubenswrapper[4716]: I1209 15:14:17.084655 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-9z9z6" Dec 09 15:14:18 crc kubenswrapper[4716]: I1209 15:14:18.363503 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-2n29z" Dec 09 15:14:18 crc kubenswrapper[4716]: I1209 15:14:18.364055 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-2n29z" Dec 09 15:14:18 crc kubenswrapper[4716]: I1209 15:14:18.410462 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-2n29z" Dec 09 15:14:18 crc kubenswrapper[4716]: I1209 15:14:18.596000 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-zxsvb" Dec 09 15:14:18 crc kubenswrapper[4716]: I1209 15:14:18.596082 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-zxsvb" Dec 09 15:14:18 crc kubenswrapper[4716]: I1209 15:14:18.641653 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-zxsvb" Dec 09 15:14:19 crc kubenswrapper[4716]: I1209 15:14:19.111639 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-2n29z" Dec 09 15:14:19 crc kubenswrapper[4716]: I1209 15:14:19.122539 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-zxsvb" Dec 09 15:14:27 crc kubenswrapper[4716]: E1209 15:14:27.126056 4716 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd90937ae_2446_41f4_94eb_e928c5d449de.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd90937ae_2446_41f4_94eb_e928c5d449de.slice/crio-c4df3decba4cf0113c63157c9570993add444413bb09039c1c08e79e11969790\": RecentStats: unable to find data in memory cache]" Dec 09 15:14:35 crc kubenswrapper[4716]: I1209 15:14:35.073159 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/cluster-monitoring-operator-6d5b84845-mltz2"] Dec 09 15:14:35 crc kubenswrapper[4716]: I1209 15:14:35.097223 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-mltz2" Dec 09 15:14:35 crc kubenswrapper[4716]: I1209 15:14:35.105994 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"openshift-service-ca.crt" Dec 09 15:14:35 crc kubenswrapper[4716]: I1209 15:14:35.106129 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"cluster-monitoring-operator-dockercfg-wwt9l" Dec 09 15:14:35 crc kubenswrapper[4716]: I1209 15:14:35.106285 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kube-root-ca.crt" Dec 09 15:14:35 crc kubenswrapper[4716]: I1209 15:14:35.106552 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"cluster-monitoring-operator-tls" Dec 09 15:14:35 crc kubenswrapper[4716]: I1209 15:14:35.106727 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"telemetry-config" Dec 09 15:14:35 crc kubenswrapper[4716]: I1209 15:14:35.108039 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/cluster-monitoring-operator-6d5b84845-mltz2"] Dec 09 15:14:35 crc kubenswrapper[4716]: I1209 15:14:35.293962 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cluster-monitoring-operator-tls\" (UniqueName: \"kubernetes.io/secret/8030b2dd-dc40-4432-843c-8819f15562f9-cluster-monitoring-operator-tls\") pod \"cluster-monitoring-operator-6d5b84845-mltz2\" (UID: \"8030b2dd-dc40-4432-843c-8819f15562f9\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-mltz2" Dec 09 15:14:35 crc kubenswrapper[4716]: I1209 15:14:35.294826 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rkqwh\" (UniqueName: \"kubernetes.io/projected/8030b2dd-dc40-4432-843c-8819f15562f9-kube-api-access-rkqwh\") pod \"cluster-monitoring-operator-6d5b84845-mltz2\" (UID: \"8030b2dd-dc40-4432-843c-8819f15562f9\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-mltz2" Dec 09 15:14:35 crc kubenswrapper[4716]: I1209 15:14:35.294971 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-config\" (UniqueName: \"kubernetes.io/configmap/8030b2dd-dc40-4432-843c-8819f15562f9-telemetry-config\") pod \"cluster-monitoring-operator-6d5b84845-mltz2\" (UID: \"8030b2dd-dc40-4432-843c-8819f15562f9\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-mltz2" Dec 09 15:14:35 crc kubenswrapper[4716]: I1209 15:14:35.395579 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cluster-monitoring-operator-tls\" (UniqueName: \"kubernetes.io/secret/8030b2dd-dc40-4432-843c-8819f15562f9-cluster-monitoring-operator-tls\") pod \"cluster-monitoring-operator-6d5b84845-mltz2\" (UID: \"8030b2dd-dc40-4432-843c-8819f15562f9\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-mltz2" Dec 09 15:14:35 crc kubenswrapper[4716]: I1209 15:14:35.396195 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rkqwh\" (UniqueName: \"kubernetes.io/projected/8030b2dd-dc40-4432-843c-8819f15562f9-kube-api-access-rkqwh\") pod \"cluster-monitoring-operator-6d5b84845-mltz2\" (UID: \"8030b2dd-dc40-4432-843c-8819f15562f9\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-mltz2" Dec 09 15:14:35 crc kubenswrapper[4716]: I1209 15:14:35.396352 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-config\" (UniqueName: \"kubernetes.io/configmap/8030b2dd-dc40-4432-843c-8819f15562f9-telemetry-config\") pod \"cluster-monitoring-operator-6d5b84845-mltz2\" (UID: \"8030b2dd-dc40-4432-843c-8819f15562f9\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-mltz2" Dec 09 15:14:35 crc kubenswrapper[4716]: I1209 15:14:35.397993 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-config\" (UniqueName: \"kubernetes.io/configmap/8030b2dd-dc40-4432-843c-8819f15562f9-telemetry-config\") pod \"cluster-monitoring-operator-6d5b84845-mltz2\" (UID: \"8030b2dd-dc40-4432-843c-8819f15562f9\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-mltz2" Dec 09 15:14:35 crc kubenswrapper[4716]: I1209 15:14:35.405576 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cluster-monitoring-operator-tls\" (UniqueName: \"kubernetes.io/secret/8030b2dd-dc40-4432-843c-8819f15562f9-cluster-monitoring-operator-tls\") pod \"cluster-monitoring-operator-6d5b84845-mltz2\" (UID: \"8030b2dd-dc40-4432-843c-8819f15562f9\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-mltz2" Dec 09 15:14:35 crc kubenswrapper[4716]: I1209 15:14:35.415459 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rkqwh\" (UniqueName: \"kubernetes.io/projected/8030b2dd-dc40-4432-843c-8819f15562f9-kube-api-access-rkqwh\") pod \"cluster-monitoring-operator-6d5b84845-mltz2\" (UID: \"8030b2dd-dc40-4432-843c-8819f15562f9\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-mltz2" Dec 09 15:14:35 crc kubenswrapper[4716]: I1209 15:14:35.420935 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-mltz2" Dec 09 15:14:35 crc kubenswrapper[4716]: I1209 15:14:35.826332 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/cluster-monitoring-operator-6d5b84845-mltz2"] Dec 09 15:14:36 crc kubenswrapper[4716]: I1209 15:14:36.164888 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-mltz2" event={"ID":"8030b2dd-dc40-4432-843c-8819f15562f9","Type":"ContainerStarted","Data":"5c4e403021b8bd82a7fa84369095effb0df9a7a482ea6261deace083cc4691b5"} Dec 09 15:14:37 crc kubenswrapper[4716]: I1209 15:14:37.983059 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-ht662" podUID="a9146318-442c-453e-977e-802cdaa5532a" containerName="registry" containerID="cri-o://a5887a2be70abfd66b398d4e97f01df280df498b37af2a3e030796b376e2a4fb" gracePeriod=30 Dec 09 15:14:38 crc kubenswrapper[4716]: I1209 15:14:38.102420 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-kzp7z"] Dec 09 15:14:38 crc kubenswrapper[4716]: I1209 15:14:38.103637 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-kzp7z" Dec 09 15:14:38 crc kubenswrapper[4716]: I1209 15:14:38.113888 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-admission-webhook-dockercfg-2v8tr" Dec 09 15:14:38 crc kubenswrapper[4716]: I1209 15:14:38.113936 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-admission-webhook-tls" Dec 09 15:14:38 crc kubenswrapper[4716]: I1209 15:14:38.126028 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-kzp7z"] Dec 09 15:14:38 crc kubenswrapper[4716]: I1209 15:14:38.139765 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/921a9f99-8a08-4f4b-8c55-6221e69d5356-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-kzp7z\" (UID: \"921a9f99-8a08-4f4b-8c55-6221e69d5356\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-kzp7z" Dec 09 15:14:38 crc kubenswrapper[4716]: I1209 15:14:38.182586 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-mltz2" event={"ID":"8030b2dd-dc40-4432-843c-8819f15562f9","Type":"ContainerStarted","Data":"3b0ca7a46d76793a3cd9a7c177870b50388446c8ce1f6302d7478159f081d78d"} Dec 09 15:14:38 crc kubenswrapper[4716]: I1209 15:14:38.184202 4716 generic.go:334] "Generic (PLEG): container finished" podID="a9146318-442c-453e-977e-802cdaa5532a" containerID="a5887a2be70abfd66b398d4e97f01df280df498b37af2a3e030796b376e2a4fb" exitCode=0 Dec 09 15:14:38 crc kubenswrapper[4716]: I1209 15:14:38.184238 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-ht662" event={"ID":"a9146318-442c-453e-977e-802cdaa5532a","Type":"ContainerDied","Data":"a5887a2be70abfd66b398d4e97f01df280df498b37af2a3e030796b376e2a4fb"} Dec 09 15:14:38 crc kubenswrapper[4716]: I1209 15:14:38.204928 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-mltz2" podStartSLOduration=1.5514539489999999 podStartE2EDuration="3.204895367s" podCreationTimestamp="2025-12-09 15:14:35 +0000 UTC" firstStartedPulling="2025-12-09 15:14:35.8383859 +0000 UTC m=+362.993129888" lastFinishedPulling="2025-12-09 15:14:37.491827318 +0000 UTC m=+364.646571306" observedRunningTime="2025-12-09 15:14:38.199306943 +0000 UTC m=+365.354050931" watchObservedRunningTime="2025-12-09 15:14:38.204895367 +0000 UTC m=+365.359639355" Dec 09 15:14:38 crc kubenswrapper[4716]: I1209 15:14:38.240898 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/921a9f99-8a08-4f4b-8c55-6221e69d5356-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-kzp7z\" (UID: \"921a9f99-8a08-4f4b-8c55-6221e69d5356\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-kzp7z" Dec 09 15:14:38 crc kubenswrapper[4716]: E1209 15:14:38.241075 4716 secret.go:188] Couldn't get secret openshift-monitoring/prometheus-operator-admission-webhook-tls: secret "prometheus-operator-admission-webhook-tls" not found Dec 09 15:14:38 crc kubenswrapper[4716]: E1209 15:14:38.241149 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/921a9f99-8a08-4f4b-8c55-6221e69d5356-tls-certificates podName:921a9f99-8a08-4f4b-8c55-6221e69d5356 nodeName:}" failed. No retries permitted until 2025-12-09 15:14:38.741124235 +0000 UTC m=+365.895868223 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-certificates" (UniqueName: "kubernetes.io/secret/921a9f99-8a08-4f4b-8c55-6221e69d5356-tls-certificates") pod "prometheus-operator-admission-webhook-f54c54754-kzp7z" (UID: "921a9f99-8a08-4f4b-8c55-6221e69d5356") : secret "prometheus-operator-admission-webhook-tls" not found Dec 09 15:14:38 crc kubenswrapper[4716]: I1209 15:14:38.475094 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:14:38 crc kubenswrapper[4716]: I1209 15:14:38.649926 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/a9146318-442c-453e-977e-802cdaa5532a-registry-tls\") pod \"a9146318-442c-453e-977e-802cdaa5532a\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " Dec 09 15:14:38 crc kubenswrapper[4716]: I1209 15:14:38.649993 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xr87p\" (UniqueName: \"kubernetes.io/projected/a9146318-442c-453e-977e-802cdaa5532a-kube-api-access-xr87p\") pod \"a9146318-442c-453e-977e-802cdaa5532a\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " Dec 09 15:14:38 crc kubenswrapper[4716]: I1209 15:14:38.650036 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/a9146318-442c-453e-977e-802cdaa5532a-registry-certificates\") pod \"a9146318-442c-453e-977e-802cdaa5532a\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " Dec 09 15:14:38 crc kubenswrapper[4716]: I1209 15:14:38.650134 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/a9146318-442c-453e-977e-802cdaa5532a-ca-trust-extracted\") pod \"a9146318-442c-453e-977e-802cdaa5532a\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " Dec 09 15:14:38 crc kubenswrapper[4716]: I1209 15:14:38.650333 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"a9146318-442c-453e-977e-802cdaa5532a\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " Dec 09 15:14:38 crc kubenswrapper[4716]: I1209 15:14:38.650354 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a9146318-442c-453e-977e-802cdaa5532a-bound-sa-token\") pod \"a9146318-442c-453e-977e-802cdaa5532a\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " Dec 09 15:14:38 crc kubenswrapper[4716]: I1209 15:14:38.650386 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a9146318-442c-453e-977e-802cdaa5532a-trusted-ca\") pod \"a9146318-442c-453e-977e-802cdaa5532a\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " Dec 09 15:14:38 crc kubenswrapper[4716]: I1209 15:14:38.650422 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/a9146318-442c-453e-977e-802cdaa5532a-installation-pull-secrets\") pod \"a9146318-442c-453e-977e-802cdaa5532a\" (UID: \"a9146318-442c-453e-977e-802cdaa5532a\") " Dec 09 15:14:38 crc kubenswrapper[4716]: I1209 15:14:38.651519 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a9146318-442c-453e-977e-802cdaa5532a-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "a9146318-442c-453e-977e-802cdaa5532a" (UID: "a9146318-442c-453e-977e-802cdaa5532a"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:14:38 crc kubenswrapper[4716]: I1209 15:14:38.651713 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a9146318-442c-453e-977e-802cdaa5532a-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a9146318-442c-453e-977e-802cdaa5532a" (UID: "a9146318-442c-453e-977e-802cdaa5532a"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:14:38 crc kubenswrapper[4716]: I1209 15:14:38.657670 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a9146318-442c-453e-977e-802cdaa5532a-kube-api-access-xr87p" (OuterVolumeSpecName: "kube-api-access-xr87p") pod "a9146318-442c-453e-977e-802cdaa5532a" (UID: "a9146318-442c-453e-977e-802cdaa5532a"). InnerVolumeSpecName "kube-api-access-xr87p". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:14:38 crc kubenswrapper[4716]: I1209 15:14:38.658443 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a9146318-442c-453e-977e-802cdaa5532a-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "a9146318-442c-453e-977e-802cdaa5532a" (UID: "a9146318-442c-453e-977e-802cdaa5532a"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:14:38 crc kubenswrapper[4716]: I1209 15:14:38.658685 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a9146318-442c-453e-977e-802cdaa5532a-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "a9146318-442c-453e-977e-802cdaa5532a" (UID: "a9146318-442c-453e-977e-802cdaa5532a"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:14:38 crc kubenswrapper[4716]: I1209 15:14:38.659026 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a9146318-442c-453e-977e-802cdaa5532a-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a9146318-442c-453e-977e-802cdaa5532a" (UID: "a9146318-442c-453e-977e-802cdaa5532a"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:14:38 crc kubenswrapper[4716]: I1209 15:14:38.670057 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "a9146318-442c-453e-977e-802cdaa5532a" (UID: "a9146318-442c-453e-977e-802cdaa5532a"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Dec 09 15:14:38 crc kubenswrapper[4716]: I1209 15:14:38.674561 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a9146318-442c-453e-977e-802cdaa5532a-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "a9146318-442c-453e-977e-802cdaa5532a" (UID: "a9146318-442c-453e-977e-802cdaa5532a"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:14:38 crc kubenswrapper[4716]: I1209 15:14:38.752559 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/921a9f99-8a08-4f4b-8c55-6221e69d5356-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-kzp7z\" (UID: \"921a9f99-8a08-4f4b-8c55-6221e69d5356\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-kzp7z" Dec 09 15:14:38 crc kubenswrapper[4716]: I1209 15:14:38.753138 4716 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/a9146318-442c-453e-977e-802cdaa5532a-registry-tls\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:38 crc kubenswrapper[4716]: I1209 15:14:38.753165 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xr87p\" (UniqueName: \"kubernetes.io/projected/a9146318-442c-453e-977e-802cdaa5532a-kube-api-access-xr87p\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:38 crc kubenswrapper[4716]: I1209 15:14:38.753182 4716 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/a9146318-442c-453e-977e-802cdaa5532a-registry-certificates\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:38 crc kubenswrapper[4716]: I1209 15:14:38.753195 4716 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/a9146318-442c-453e-977e-802cdaa5532a-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:38 crc kubenswrapper[4716]: I1209 15:14:38.753209 4716 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a9146318-442c-453e-977e-802cdaa5532a-bound-sa-token\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:38 crc kubenswrapper[4716]: I1209 15:14:38.753222 4716 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a9146318-442c-453e-977e-802cdaa5532a-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:38 crc kubenswrapper[4716]: I1209 15:14:38.753236 4716 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/a9146318-442c-453e-977e-802cdaa5532a-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Dec 09 15:14:38 crc kubenswrapper[4716]: I1209 15:14:38.763992 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/921a9f99-8a08-4f4b-8c55-6221e69d5356-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-kzp7z\" (UID: \"921a9f99-8a08-4f4b-8c55-6221e69d5356\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-kzp7z" Dec 09 15:14:39 crc kubenswrapper[4716]: I1209 15:14:39.027549 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-kzp7z" Dec 09 15:14:39 crc kubenswrapper[4716]: I1209 15:14:39.193973 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-ht662" event={"ID":"a9146318-442c-453e-977e-802cdaa5532a","Type":"ContainerDied","Data":"e940a4a14b56488d6124ffcd63397aabbbbc12d673621815919d2bb173d46f94"} Dec 09 15:14:39 crc kubenswrapper[4716]: I1209 15:14:39.194013 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-ht662" Dec 09 15:14:39 crc kubenswrapper[4716]: I1209 15:14:39.194056 4716 scope.go:117] "RemoveContainer" containerID="a5887a2be70abfd66b398d4e97f01df280df498b37af2a3e030796b376e2a4fb" Dec 09 15:14:39 crc kubenswrapper[4716]: I1209 15:14:39.233447 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-ht662"] Dec 09 15:14:39 crc kubenswrapper[4716]: I1209 15:14:39.235016 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-ht662"] Dec 09 15:14:39 crc kubenswrapper[4716]: I1209 15:14:39.588375 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-kzp7z"] Dec 09 15:14:39 crc kubenswrapper[4716]: W1209 15:14:39.591977 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod921a9f99_8a08_4f4b_8c55_6221e69d5356.slice/crio-86154a318a790cdfa5abddba8408332620cd34e2ae793fdaeb00fb450a248f68 WatchSource:0}: Error finding container 86154a318a790cdfa5abddba8408332620cd34e2ae793fdaeb00fb450a248f68: Status 404 returned error can't find the container with id 86154a318a790cdfa5abddba8408332620cd34e2ae793fdaeb00fb450a248f68 Dec 09 15:14:40 crc kubenswrapper[4716]: I1209 15:14:40.203110 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-kzp7z" event={"ID":"921a9f99-8a08-4f4b-8c55-6221e69d5356","Type":"ContainerStarted","Data":"86154a318a790cdfa5abddba8408332620cd34e2ae793fdaeb00fb450a248f68"} Dec 09 15:14:41 crc kubenswrapper[4716]: I1209 15:14:41.228090 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a9146318-442c-453e-977e-802cdaa5532a" path="/var/lib/kubelet/pods/a9146318-442c-453e-977e-802cdaa5532a/volumes" Dec 09 15:14:41 crc kubenswrapper[4716]: I1209 15:14:41.229428 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-kzp7z" event={"ID":"921a9f99-8a08-4f4b-8c55-6221e69d5356","Type":"ContainerStarted","Data":"b9331d07d23871c55a9ea30653a9c6c57855f7e7d9afcf9baf76255dcacffe5a"} Dec 09 15:14:41 crc kubenswrapper[4716]: I1209 15:14:41.229476 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-kzp7z" Dec 09 15:14:41 crc kubenswrapper[4716]: I1209 15:14:41.231331 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-kzp7z" Dec 09 15:14:41 crc kubenswrapper[4716]: I1209 15:14:41.242271 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-kzp7z" podStartSLOduration=1.8819986819999999 podStartE2EDuration="3.242243477s" podCreationTimestamp="2025-12-09 15:14:38 +0000 UTC" firstStartedPulling="2025-12-09 15:14:39.59457686 +0000 UTC m=+366.749320838" lastFinishedPulling="2025-12-09 15:14:40.954821645 +0000 UTC m=+368.109565633" observedRunningTime="2025-12-09 15:14:41.239562188 +0000 UTC m=+368.394306186" watchObservedRunningTime="2025-12-09 15:14:41.242243477 +0000 UTC m=+368.396987485" Dec 09 15:14:42 crc kubenswrapper[4716]: I1209 15:14:42.160329 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/prometheus-operator-db54df47d-r6w7n"] Dec 09 15:14:42 crc kubenswrapper[4716]: E1209 15:14:42.160616 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9146318-442c-453e-977e-802cdaa5532a" containerName="registry" Dec 09 15:14:42 crc kubenswrapper[4716]: I1209 15:14:42.160668 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9146318-442c-453e-977e-802cdaa5532a" containerName="registry" Dec 09 15:14:42 crc kubenswrapper[4716]: I1209 15:14:42.160798 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9146318-442c-453e-977e-802cdaa5532a" containerName="registry" Dec 09 15:14:42 crc kubenswrapper[4716]: I1209 15:14:42.161495 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-db54df47d-r6w7n" Dec 09 15:14:42 crc kubenswrapper[4716]: I1209 15:14:42.165033 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-kube-rbac-proxy-config" Dec 09 15:14:42 crc kubenswrapper[4716]: I1209 15:14:42.165266 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-dockercfg-dzctn" Dec 09 15:14:42 crc kubenswrapper[4716]: I1209 15:14:42.165406 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"metrics-client-ca" Dec 09 15:14:42 crc kubenswrapper[4716]: I1209 15:14:42.171919 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-tls" Dec 09 15:14:42 crc kubenswrapper[4716]: I1209 15:14:42.172316 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-db54df47d-r6w7n"] Dec 09 15:14:42 crc kubenswrapper[4716]: I1209 15:14:42.307152 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p6w75\" (UniqueName: \"kubernetes.io/projected/214a97f5-9e95-47ab-ba2b-b7c6169e93eb-kube-api-access-p6w75\") pod \"prometheus-operator-db54df47d-r6w7n\" (UID: \"214a97f5-9e95-47ab-ba2b-b7c6169e93eb\") " pod="openshift-monitoring/prometheus-operator-db54df47d-r6w7n" Dec 09 15:14:42 crc kubenswrapper[4716]: I1209 15:14:42.308006 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/214a97f5-9e95-47ab-ba2b-b7c6169e93eb-metrics-client-ca\") pod \"prometheus-operator-db54df47d-r6w7n\" (UID: \"214a97f5-9e95-47ab-ba2b-b7c6169e93eb\") " pod="openshift-monitoring/prometheus-operator-db54df47d-r6w7n" Dec 09 15:14:42 crc kubenswrapper[4716]: I1209 15:14:42.308236 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-operator-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/214a97f5-9e95-47ab-ba2b-b7c6169e93eb-prometheus-operator-kube-rbac-proxy-config\") pod \"prometheus-operator-db54df47d-r6w7n\" (UID: \"214a97f5-9e95-47ab-ba2b-b7c6169e93eb\") " pod="openshift-monitoring/prometheus-operator-db54df47d-r6w7n" Dec 09 15:14:42 crc kubenswrapper[4716]: I1209 15:14:42.308398 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/214a97f5-9e95-47ab-ba2b-b7c6169e93eb-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-r6w7n\" (UID: \"214a97f5-9e95-47ab-ba2b-b7c6169e93eb\") " pod="openshift-monitoring/prometheus-operator-db54df47d-r6w7n" Dec 09 15:14:42 crc kubenswrapper[4716]: I1209 15:14:42.409058 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/214a97f5-9e95-47ab-ba2b-b7c6169e93eb-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-r6w7n\" (UID: \"214a97f5-9e95-47ab-ba2b-b7c6169e93eb\") " pod="openshift-monitoring/prometheus-operator-db54df47d-r6w7n" Dec 09 15:14:42 crc kubenswrapper[4716]: I1209 15:14:42.409161 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p6w75\" (UniqueName: \"kubernetes.io/projected/214a97f5-9e95-47ab-ba2b-b7c6169e93eb-kube-api-access-p6w75\") pod \"prometheus-operator-db54df47d-r6w7n\" (UID: \"214a97f5-9e95-47ab-ba2b-b7c6169e93eb\") " pod="openshift-monitoring/prometheus-operator-db54df47d-r6w7n" Dec 09 15:14:42 crc kubenswrapper[4716]: I1209 15:14:42.409186 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/214a97f5-9e95-47ab-ba2b-b7c6169e93eb-metrics-client-ca\") pod \"prometheus-operator-db54df47d-r6w7n\" (UID: \"214a97f5-9e95-47ab-ba2b-b7c6169e93eb\") " pod="openshift-monitoring/prometheus-operator-db54df47d-r6w7n" Dec 09 15:14:42 crc kubenswrapper[4716]: I1209 15:14:42.409206 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-operator-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/214a97f5-9e95-47ab-ba2b-b7c6169e93eb-prometheus-operator-kube-rbac-proxy-config\") pod \"prometheus-operator-db54df47d-r6w7n\" (UID: \"214a97f5-9e95-47ab-ba2b-b7c6169e93eb\") " pod="openshift-monitoring/prometheus-operator-db54df47d-r6w7n" Dec 09 15:14:42 crc kubenswrapper[4716]: I1209 15:14:42.410582 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/214a97f5-9e95-47ab-ba2b-b7c6169e93eb-metrics-client-ca\") pod \"prometheus-operator-db54df47d-r6w7n\" (UID: \"214a97f5-9e95-47ab-ba2b-b7c6169e93eb\") " pod="openshift-monitoring/prometheus-operator-db54df47d-r6w7n" Dec 09 15:14:42 crc kubenswrapper[4716]: I1209 15:14:42.418439 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/214a97f5-9e95-47ab-ba2b-b7c6169e93eb-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-r6w7n\" (UID: \"214a97f5-9e95-47ab-ba2b-b7c6169e93eb\") " pod="openshift-monitoring/prometheus-operator-db54df47d-r6w7n" Dec 09 15:14:42 crc kubenswrapper[4716]: I1209 15:14:42.418557 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-operator-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/214a97f5-9e95-47ab-ba2b-b7c6169e93eb-prometheus-operator-kube-rbac-proxy-config\") pod \"prometheus-operator-db54df47d-r6w7n\" (UID: \"214a97f5-9e95-47ab-ba2b-b7c6169e93eb\") " pod="openshift-monitoring/prometheus-operator-db54df47d-r6w7n" Dec 09 15:14:42 crc kubenswrapper[4716]: I1209 15:14:42.428407 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p6w75\" (UniqueName: \"kubernetes.io/projected/214a97f5-9e95-47ab-ba2b-b7c6169e93eb-kube-api-access-p6w75\") pod \"prometheus-operator-db54df47d-r6w7n\" (UID: \"214a97f5-9e95-47ab-ba2b-b7c6169e93eb\") " pod="openshift-monitoring/prometheus-operator-db54df47d-r6w7n" Dec 09 15:14:42 crc kubenswrapper[4716]: I1209 15:14:42.481425 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-db54df47d-r6w7n" Dec 09 15:14:42 crc kubenswrapper[4716]: I1209 15:14:42.899555 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-db54df47d-r6w7n"] Dec 09 15:14:42 crc kubenswrapper[4716]: W1209 15:14:42.905304 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod214a97f5_9e95_47ab_ba2b_b7c6169e93eb.slice/crio-2280bf58aa028d37d87ce5631b161ba059b3cbf67e1d41fb86bb63cc2143be12 WatchSource:0}: Error finding container 2280bf58aa028d37d87ce5631b161ba059b3cbf67e1d41fb86bb63cc2143be12: Status 404 returned error can't find the container with id 2280bf58aa028d37d87ce5631b161ba059b3cbf67e1d41fb86bb63cc2143be12 Dec 09 15:14:43 crc kubenswrapper[4716]: I1209 15:14:43.239936 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-db54df47d-r6w7n" event={"ID":"214a97f5-9e95-47ab-ba2b-b7c6169e93eb","Type":"ContainerStarted","Data":"2280bf58aa028d37d87ce5631b161ba059b3cbf67e1d41fb86bb63cc2143be12"} Dec 09 15:14:45 crc kubenswrapper[4716]: I1209 15:14:45.256423 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-db54df47d-r6w7n" event={"ID":"214a97f5-9e95-47ab-ba2b-b7c6169e93eb","Type":"ContainerStarted","Data":"b42263f31ff733a65a48a51c5eed3d90271d9a68c50a15623a5155f4f99016cb"} Dec 09 15:14:45 crc kubenswrapper[4716]: I1209 15:14:45.257079 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-db54df47d-r6w7n" event={"ID":"214a97f5-9e95-47ab-ba2b-b7c6169e93eb","Type":"ContainerStarted","Data":"c5d913cdcf3068df250d5e33f8b8a35185e4b8de47a166086c5a9a61185fd3de"} Dec 09 15:14:45 crc kubenswrapper[4716]: I1209 15:14:45.277554 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/prometheus-operator-db54df47d-r6w7n" podStartSLOduration=1.451445257 podStartE2EDuration="3.277530134s" podCreationTimestamp="2025-12-09 15:14:42 +0000 UTC" firstStartedPulling="2025-12-09 15:14:42.908337118 +0000 UTC m=+370.063081106" lastFinishedPulling="2025-12-09 15:14:44.734421995 +0000 UTC m=+371.889165983" observedRunningTime="2025-12-09 15:14:45.275361921 +0000 UTC m=+372.430105929" watchObservedRunningTime="2025-12-09 15:14:45.277530134 +0000 UTC m=+372.432274122" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.538437 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/openshift-state-metrics-566fddb674-6dz7r"] Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.540501 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/openshift-state-metrics-566fddb674-6dz7r" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.542985 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/kube-state-metrics-777cb5bd5d-hk6h5"] Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.544987 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-hk6h5" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.546273 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-dockercfg-7x5wp" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.546464 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-tls" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.548100 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-tls" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.548109 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kube-state-metrics-custom-resource-state-configmap" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.548197 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-dockercfg-gpz9z" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.549032 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-kube-rbac-proxy-config" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.550419 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-kube-rbac-proxy-config" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.560719 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/openshift-state-metrics-566fddb674-6dz7r"] Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.600487 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/kube-state-metrics-777cb5bd5d-hk6h5"] Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.617251 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/node-exporter-s6sf4"] Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.618392 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/node-exporter-s6sf4" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.621342 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-tls" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.621363 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-kube-rbac-proxy-config" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.621406 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-dockercfg-rlt75" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.698815 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-custom-resource-state-configmap\" (UniqueName: \"kubernetes.io/configmap/ebb184a7-5322-40ab-8636-4b1d78882422-kube-state-metrics-custom-resource-state-configmap\") pod \"kube-state-metrics-777cb5bd5d-hk6h5\" (UID: \"ebb184a7-5322-40ab-8636-4b1d78882422\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-hk6h5" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.698882 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/0c47b1a7-bbe0-4381-9276-58fa98d7f2bc-node-exporter-kube-rbac-proxy-config\") pod \"node-exporter-s6sf4\" (UID: \"0c47b1a7-bbe0-4381-9276-58fa98d7f2bc\") " pod="openshift-monitoring/node-exporter-s6sf4" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.698944 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/5cc5cfca-7c35-49ed-9b4a-d4416082b264-metrics-client-ca\") pod \"openshift-state-metrics-566fddb674-6dz7r\" (UID: \"5cc5cfca-7c35-49ed-9b4a-d4416082b264\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-6dz7r" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.699082 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/0c47b1a7-bbe0-4381-9276-58fa98d7f2bc-sys\") pod \"node-exporter-s6sf4\" (UID: \"0c47b1a7-bbe0-4381-9276-58fa98d7f2bc\") " pod="openshift-monitoring/node-exporter-s6sf4" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.699128 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-tls\" (UniqueName: \"kubernetes.io/secret/0c47b1a7-bbe0-4381-9276-58fa98d7f2bc-node-exporter-tls\") pod \"node-exporter-s6sf4\" (UID: \"0c47b1a7-bbe0-4381-9276-58fa98d7f2bc\") " pod="openshift-monitoring/node-exporter-s6sf4" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.699161 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/5cc5cfca-7c35-49ed-9b4a-d4416082b264-openshift-state-metrics-kube-rbac-proxy-config\") pod \"openshift-state-metrics-566fddb674-6dz7r\" (UID: \"5cc5cfca-7c35-49ed-9b4a-d4416082b264\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-6dz7r" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.699189 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"root\" (UniqueName: \"kubernetes.io/host-path/0c47b1a7-bbe0-4381-9276-58fa98d7f2bc-root\") pod \"node-exporter-s6sf4\" (UID: \"0c47b1a7-bbe0-4381-9276-58fa98d7f2bc\") " pod="openshift-monitoring/node-exporter-s6sf4" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.699231 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f42q7\" (UniqueName: \"kubernetes.io/projected/5cc5cfca-7c35-49ed-9b4a-d4416082b264-kube-api-access-f42q7\") pod \"openshift-state-metrics-566fddb674-6dz7r\" (UID: \"5cc5cfca-7c35-49ed-9b4a-d4416082b264\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-6dz7r" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.699281 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-textfile\" (UniqueName: \"kubernetes.io/empty-dir/0c47b1a7-bbe0-4381-9276-58fa98d7f2bc-node-exporter-textfile\") pod \"node-exporter-s6sf4\" (UID: \"0c47b1a7-bbe0-4381-9276-58fa98d7f2bc\") " pod="openshift-monitoring/node-exporter-s6sf4" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.699327 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/ebb184a7-5322-40ab-8636-4b1d78882422-metrics-client-ca\") pod \"kube-state-metrics-777cb5bd5d-hk6h5\" (UID: \"ebb184a7-5322-40ab-8636-4b1d78882422\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-hk6h5" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.699363 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m2vcd\" (UniqueName: \"kubernetes.io/projected/ebb184a7-5322-40ab-8636-4b1d78882422-kube-api-access-m2vcd\") pod \"kube-state-metrics-777cb5bd5d-hk6h5\" (UID: \"ebb184a7-5322-40ab-8636-4b1d78882422\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-hk6h5" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.699388 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"volume-directive-shadow\" (UniqueName: \"kubernetes.io/empty-dir/ebb184a7-5322-40ab-8636-4b1d78882422-volume-directive-shadow\") pod \"kube-state-metrics-777cb5bd5d-hk6h5\" (UID: \"ebb184a7-5322-40ab-8636-4b1d78882422\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-hk6h5" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.699450 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/ebb184a7-5322-40ab-8636-4b1d78882422-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-hk6h5\" (UID: \"ebb184a7-5322-40ab-8636-4b1d78882422\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-hk6h5" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.699532 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/5cc5cfca-7c35-49ed-9b4a-d4416082b264-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-6dz7r\" (UID: \"5cc5cfca-7c35-49ed-9b4a-d4416082b264\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-6dz7r" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.699567 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/ebb184a7-5322-40ab-8636-4b1d78882422-kube-state-metrics-kube-rbac-proxy-config\") pod \"kube-state-metrics-777cb5bd5d-hk6h5\" (UID: \"ebb184a7-5322-40ab-8636-4b1d78882422\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-hk6h5" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.699601 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-wtmp\" (UniqueName: \"kubernetes.io/host-path/0c47b1a7-bbe0-4381-9276-58fa98d7f2bc-node-exporter-wtmp\") pod \"node-exporter-s6sf4\" (UID: \"0c47b1a7-bbe0-4381-9276-58fa98d7f2bc\") " pod="openshift-monitoring/node-exporter-s6sf4" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.699693 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-98m4l\" (UniqueName: \"kubernetes.io/projected/0c47b1a7-bbe0-4381-9276-58fa98d7f2bc-kube-api-access-98m4l\") pod \"node-exporter-s6sf4\" (UID: \"0c47b1a7-bbe0-4381-9276-58fa98d7f2bc\") " pod="openshift-monitoring/node-exporter-s6sf4" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.699824 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/0c47b1a7-bbe0-4381-9276-58fa98d7f2bc-metrics-client-ca\") pod \"node-exporter-s6sf4\" (UID: \"0c47b1a7-bbe0-4381-9276-58fa98d7f2bc\") " pod="openshift-monitoring/node-exporter-s6sf4" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.801816 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/5cc5cfca-7c35-49ed-9b4a-d4416082b264-metrics-client-ca\") pod \"openshift-state-metrics-566fddb674-6dz7r\" (UID: \"5cc5cfca-7c35-49ed-9b4a-d4416082b264\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-6dz7r" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.801895 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/0c47b1a7-bbe0-4381-9276-58fa98d7f2bc-sys\") pod \"node-exporter-s6sf4\" (UID: \"0c47b1a7-bbe0-4381-9276-58fa98d7f2bc\") " pod="openshift-monitoring/node-exporter-s6sf4" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.801922 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/5cc5cfca-7c35-49ed-9b4a-d4416082b264-openshift-state-metrics-kube-rbac-proxy-config\") pod \"openshift-state-metrics-566fddb674-6dz7r\" (UID: \"5cc5cfca-7c35-49ed-9b4a-d4416082b264\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-6dz7r" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.801942 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"root\" (UniqueName: \"kubernetes.io/host-path/0c47b1a7-bbe0-4381-9276-58fa98d7f2bc-root\") pod \"node-exporter-s6sf4\" (UID: \"0c47b1a7-bbe0-4381-9276-58fa98d7f2bc\") " pod="openshift-monitoring/node-exporter-s6sf4" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.801960 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-tls\" (UniqueName: \"kubernetes.io/secret/0c47b1a7-bbe0-4381-9276-58fa98d7f2bc-node-exporter-tls\") pod \"node-exporter-s6sf4\" (UID: \"0c47b1a7-bbe0-4381-9276-58fa98d7f2bc\") " pod="openshift-monitoring/node-exporter-s6sf4" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.801986 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f42q7\" (UniqueName: \"kubernetes.io/projected/5cc5cfca-7c35-49ed-9b4a-d4416082b264-kube-api-access-f42q7\") pod \"openshift-state-metrics-566fddb674-6dz7r\" (UID: \"5cc5cfca-7c35-49ed-9b4a-d4416082b264\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-6dz7r" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.802003 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-textfile\" (UniqueName: \"kubernetes.io/empty-dir/0c47b1a7-bbe0-4381-9276-58fa98d7f2bc-node-exporter-textfile\") pod \"node-exporter-s6sf4\" (UID: \"0c47b1a7-bbe0-4381-9276-58fa98d7f2bc\") " pod="openshift-monitoring/node-exporter-s6sf4" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.802031 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/ebb184a7-5322-40ab-8636-4b1d78882422-metrics-client-ca\") pod \"kube-state-metrics-777cb5bd5d-hk6h5\" (UID: \"ebb184a7-5322-40ab-8636-4b1d78882422\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-hk6h5" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.802053 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m2vcd\" (UniqueName: \"kubernetes.io/projected/ebb184a7-5322-40ab-8636-4b1d78882422-kube-api-access-m2vcd\") pod \"kube-state-metrics-777cb5bd5d-hk6h5\" (UID: \"ebb184a7-5322-40ab-8636-4b1d78882422\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-hk6h5" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.802065 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"root\" (UniqueName: \"kubernetes.io/host-path/0c47b1a7-bbe0-4381-9276-58fa98d7f2bc-root\") pod \"node-exporter-s6sf4\" (UID: \"0c47b1a7-bbe0-4381-9276-58fa98d7f2bc\") " pod="openshift-monitoring/node-exporter-s6sf4" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.802122 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/0c47b1a7-bbe0-4381-9276-58fa98d7f2bc-sys\") pod \"node-exporter-s6sf4\" (UID: \"0c47b1a7-bbe0-4381-9276-58fa98d7f2bc\") " pod="openshift-monitoring/node-exporter-s6sf4" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.802165 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"volume-directive-shadow\" (UniqueName: \"kubernetes.io/empty-dir/ebb184a7-5322-40ab-8636-4b1d78882422-volume-directive-shadow\") pod \"kube-state-metrics-777cb5bd5d-hk6h5\" (UID: \"ebb184a7-5322-40ab-8636-4b1d78882422\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-hk6h5" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.802462 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/ebb184a7-5322-40ab-8636-4b1d78882422-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-hk6h5\" (UID: \"ebb184a7-5322-40ab-8636-4b1d78882422\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-hk6h5" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.802595 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/5cc5cfca-7c35-49ed-9b4a-d4416082b264-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-6dz7r\" (UID: \"5cc5cfca-7c35-49ed-9b4a-d4416082b264\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-6dz7r" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.802665 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/ebb184a7-5322-40ab-8636-4b1d78882422-kube-state-metrics-kube-rbac-proxy-config\") pod \"kube-state-metrics-777cb5bd5d-hk6h5\" (UID: \"ebb184a7-5322-40ab-8636-4b1d78882422\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-hk6h5" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.802713 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-wtmp\" (UniqueName: \"kubernetes.io/host-path/0c47b1a7-bbe0-4381-9276-58fa98d7f2bc-node-exporter-wtmp\") pod \"node-exporter-s6sf4\" (UID: \"0c47b1a7-bbe0-4381-9276-58fa98d7f2bc\") " pod="openshift-monitoring/node-exporter-s6sf4" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.802757 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-98m4l\" (UniqueName: \"kubernetes.io/projected/0c47b1a7-bbe0-4381-9276-58fa98d7f2bc-kube-api-access-98m4l\") pod \"node-exporter-s6sf4\" (UID: \"0c47b1a7-bbe0-4381-9276-58fa98d7f2bc\") " pod="openshift-monitoring/node-exporter-s6sf4" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.803014 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-textfile\" (UniqueName: \"kubernetes.io/empty-dir/0c47b1a7-bbe0-4381-9276-58fa98d7f2bc-node-exporter-textfile\") pod \"node-exporter-s6sf4\" (UID: \"0c47b1a7-bbe0-4381-9276-58fa98d7f2bc\") " pod="openshift-monitoring/node-exporter-s6sf4" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.803034 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-wtmp\" (UniqueName: \"kubernetes.io/host-path/0c47b1a7-bbe0-4381-9276-58fa98d7f2bc-node-exporter-wtmp\") pod \"node-exporter-s6sf4\" (UID: \"0c47b1a7-bbe0-4381-9276-58fa98d7f2bc\") " pod="openshift-monitoring/node-exporter-s6sf4" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.803463 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/0c47b1a7-bbe0-4381-9276-58fa98d7f2bc-metrics-client-ca\") pod \"node-exporter-s6sf4\" (UID: \"0c47b1a7-bbe0-4381-9276-58fa98d7f2bc\") " pod="openshift-monitoring/node-exporter-s6sf4" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.803556 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-custom-resource-state-configmap\" (UniqueName: \"kubernetes.io/configmap/ebb184a7-5322-40ab-8636-4b1d78882422-kube-state-metrics-custom-resource-state-configmap\") pod \"kube-state-metrics-777cb5bd5d-hk6h5\" (UID: \"ebb184a7-5322-40ab-8636-4b1d78882422\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-hk6h5" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.803607 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/0c47b1a7-bbe0-4381-9276-58fa98d7f2bc-node-exporter-kube-rbac-proxy-config\") pod \"node-exporter-s6sf4\" (UID: \"0c47b1a7-bbe0-4381-9276-58fa98d7f2bc\") " pod="openshift-monitoring/node-exporter-s6sf4" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.803938 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/ebb184a7-5322-40ab-8636-4b1d78882422-metrics-client-ca\") pod \"kube-state-metrics-777cb5bd5d-hk6h5\" (UID: \"ebb184a7-5322-40ab-8636-4b1d78882422\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-hk6h5" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.804297 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/0c47b1a7-bbe0-4381-9276-58fa98d7f2bc-metrics-client-ca\") pod \"node-exporter-s6sf4\" (UID: \"0c47b1a7-bbe0-4381-9276-58fa98d7f2bc\") " pod="openshift-monitoring/node-exporter-s6sf4" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.804423 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/5cc5cfca-7c35-49ed-9b4a-d4416082b264-metrics-client-ca\") pod \"openshift-state-metrics-566fddb674-6dz7r\" (UID: \"5cc5cfca-7c35-49ed-9b4a-d4416082b264\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-6dz7r" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.804530 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-custom-resource-state-configmap\" (UniqueName: \"kubernetes.io/configmap/ebb184a7-5322-40ab-8636-4b1d78882422-kube-state-metrics-custom-resource-state-configmap\") pod \"kube-state-metrics-777cb5bd5d-hk6h5\" (UID: \"ebb184a7-5322-40ab-8636-4b1d78882422\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-hk6h5" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.829093 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"volume-directive-shadow\" (UniqueName: \"kubernetes.io/empty-dir/ebb184a7-5322-40ab-8636-4b1d78882422-volume-directive-shadow\") pod \"kube-state-metrics-777cb5bd5d-hk6h5\" (UID: \"ebb184a7-5322-40ab-8636-4b1d78882422\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-hk6h5" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.830081 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/ebb184a7-5322-40ab-8636-4b1d78882422-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-hk6h5\" (UID: \"ebb184a7-5322-40ab-8636-4b1d78882422\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-hk6h5" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.831636 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/5cc5cfca-7c35-49ed-9b4a-d4416082b264-openshift-state-metrics-kube-rbac-proxy-config\") pod \"openshift-state-metrics-566fddb674-6dz7r\" (UID: \"5cc5cfca-7c35-49ed-9b4a-d4416082b264\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-6dz7r" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.832232 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/0c47b1a7-bbe0-4381-9276-58fa98d7f2bc-node-exporter-kube-rbac-proxy-config\") pod \"node-exporter-s6sf4\" (UID: \"0c47b1a7-bbe0-4381-9276-58fa98d7f2bc\") " pod="openshift-monitoring/node-exporter-s6sf4" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.843451 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-tls\" (UniqueName: \"kubernetes.io/secret/0c47b1a7-bbe0-4381-9276-58fa98d7f2bc-node-exporter-tls\") pod \"node-exporter-s6sf4\" (UID: \"0c47b1a7-bbe0-4381-9276-58fa98d7f2bc\") " pod="openshift-monitoring/node-exporter-s6sf4" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.843550 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/5cc5cfca-7c35-49ed-9b4a-d4416082b264-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-6dz7r\" (UID: \"5cc5cfca-7c35-49ed-9b4a-d4416082b264\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-6dz7r" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.843595 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/ebb184a7-5322-40ab-8636-4b1d78882422-kube-state-metrics-kube-rbac-proxy-config\") pod \"kube-state-metrics-777cb5bd5d-hk6h5\" (UID: \"ebb184a7-5322-40ab-8636-4b1d78882422\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-hk6h5" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.863480 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m2vcd\" (UniqueName: \"kubernetes.io/projected/ebb184a7-5322-40ab-8636-4b1d78882422-kube-api-access-m2vcd\") pod \"kube-state-metrics-777cb5bd5d-hk6h5\" (UID: \"ebb184a7-5322-40ab-8636-4b1d78882422\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-hk6h5" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.871879 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-98m4l\" (UniqueName: \"kubernetes.io/projected/0c47b1a7-bbe0-4381-9276-58fa98d7f2bc-kube-api-access-98m4l\") pod \"node-exporter-s6sf4\" (UID: \"0c47b1a7-bbe0-4381-9276-58fa98d7f2bc\") " pod="openshift-monitoring/node-exporter-s6sf4" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.878929 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f42q7\" (UniqueName: \"kubernetes.io/projected/5cc5cfca-7c35-49ed-9b4a-d4416082b264-kube-api-access-f42q7\") pod \"openshift-state-metrics-566fddb674-6dz7r\" (UID: \"5cc5cfca-7c35-49ed-9b4a-d4416082b264\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-6dz7r" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.904847 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/openshift-state-metrics-566fddb674-6dz7r" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.921960 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.922347 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.922679 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-hk6h5" Dec 09 15:14:47 crc kubenswrapper[4716]: I1209 15:14:47.932588 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/node-exporter-s6sf4" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.283424 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/kube-state-metrics-777cb5bd5d-hk6h5"] Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.288119 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-s6sf4" event={"ID":"0c47b1a7-bbe0-4381-9276-58fa98d7f2bc","Type":"ContainerStarted","Data":"da6eed23b59d34d619e719d4c585b000a8efc6dbe943c8681ecae3a60f40958e"} Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.402587 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/openshift-state-metrics-566fddb674-6dz7r"] Dec 09 15:14:48 crc kubenswrapper[4716]: W1209 15:14:48.410667 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5cc5cfca_7c35_49ed_9b4a_d4416082b264.slice/crio-4764737dd071de2fb3ef2d72fd3ef69b88abac9bb5435679590edccc2e93efb4 WatchSource:0}: Error finding container 4764737dd071de2fb3ef2d72fd3ef69b88abac9bb5435679590edccc2e93efb4: Status 404 returned error can't find the container with id 4764737dd071de2fb3ef2d72fd3ef69b88abac9bb5435679590edccc2e93efb4 Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.772389 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/alertmanager-main-0"] Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.775170 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/alertmanager-main-0" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.779696 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-tls-assets-0" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.779751 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-dockercfg-xtstv" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.779957 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-web-config" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.780087 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy-web" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.780163 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.780188 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-tls" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.784140 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy-metric" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.784197 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-generated" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.791221 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"alertmanager-trusted-ca-bundle" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.798747 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/alertmanager-main-0"] Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.847942 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/65f1aaad-6876-4c62-8969-dddcc323b532-metrics-client-ca\") pod \"alertmanager-main-0\" (UID: \"65f1aaad-6876-4c62-8969-dddcc323b532\") " pod="openshift-monitoring/alertmanager-main-0" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.847999 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/65f1aaad-6876-4c62-8969-dddcc323b532-config-volume\") pod \"alertmanager-main-0\" (UID: \"65f1aaad-6876-4c62-8969-dddcc323b532\") " pod="openshift-monitoring/alertmanager-main-0" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.848048 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-kube-rbac-proxy-metric\" (UniqueName: \"kubernetes.io/secret/65f1aaad-6876-4c62-8969-dddcc323b532-secret-alertmanager-kube-rbac-proxy-metric\") pod \"alertmanager-main-0\" (UID: \"65f1aaad-6876-4c62-8969-dddcc323b532\") " pod="openshift-monitoring/alertmanager-main-0" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.848104 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/65f1aaad-6876-4c62-8969-dddcc323b532-secret-alertmanager-kube-rbac-proxy-web\") pod \"alertmanager-main-0\" (UID: \"65f1aaad-6876-4c62-8969-dddcc323b532\") " pod="openshift-monitoring/alertmanager-main-0" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.848124 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/65f1aaad-6876-4c62-8969-dddcc323b532-config-out\") pod \"alertmanager-main-0\" (UID: \"65f1aaad-6876-4c62-8969-dddcc323b532\") " pod="openshift-monitoring/alertmanager-main-0" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.848150 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-main-db\" (UniqueName: \"kubernetes.io/empty-dir/65f1aaad-6876-4c62-8969-dddcc323b532-alertmanager-main-db\") pod \"alertmanager-main-0\" (UID: \"65f1aaad-6876-4c62-8969-dddcc323b532\") " pod="openshift-monitoring/alertmanager-main-0" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.848184 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-main-tls\" (UniqueName: \"kubernetes.io/secret/65f1aaad-6876-4c62-8969-dddcc323b532-secret-alertmanager-main-tls\") pod \"alertmanager-main-0\" (UID: \"65f1aaad-6876-4c62-8969-dddcc323b532\") " pod="openshift-monitoring/alertmanager-main-0" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.848319 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qxksm\" (UniqueName: \"kubernetes.io/projected/65f1aaad-6876-4c62-8969-dddcc323b532-kube-api-access-qxksm\") pod \"alertmanager-main-0\" (UID: \"65f1aaad-6876-4c62-8969-dddcc323b532\") " pod="openshift-monitoring/alertmanager-main-0" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.848353 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/65f1aaad-6876-4c62-8969-dddcc323b532-alertmanager-trusted-ca-bundle\") pod \"alertmanager-main-0\" (UID: \"65f1aaad-6876-4c62-8969-dddcc323b532\") " pod="openshift-monitoring/alertmanager-main-0" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.848390 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/65f1aaad-6876-4c62-8969-dddcc323b532-tls-assets\") pod \"alertmanager-main-0\" (UID: \"65f1aaad-6876-4c62-8969-dddcc323b532\") " pod="openshift-monitoring/alertmanager-main-0" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.848414 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/65f1aaad-6876-4c62-8969-dddcc323b532-secret-alertmanager-kube-rbac-proxy\") pod \"alertmanager-main-0\" (UID: \"65f1aaad-6876-4c62-8969-dddcc323b532\") " pod="openshift-monitoring/alertmanager-main-0" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.848444 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/65f1aaad-6876-4c62-8969-dddcc323b532-web-config\") pod \"alertmanager-main-0\" (UID: \"65f1aaad-6876-4c62-8969-dddcc323b532\") " pod="openshift-monitoring/alertmanager-main-0" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.951250 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/65f1aaad-6876-4c62-8969-dddcc323b532-tls-assets\") pod \"alertmanager-main-0\" (UID: \"65f1aaad-6876-4c62-8969-dddcc323b532\") " pod="openshift-monitoring/alertmanager-main-0" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.951319 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/65f1aaad-6876-4c62-8969-dddcc323b532-secret-alertmanager-kube-rbac-proxy\") pod \"alertmanager-main-0\" (UID: \"65f1aaad-6876-4c62-8969-dddcc323b532\") " pod="openshift-monitoring/alertmanager-main-0" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.951367 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/65f1aaad-6876-4c62-8969-dddcc323b532-web-config\") pod \"alertmanager-main-0\" (UID: \"65f1aaad-6876-4c62-8969-dddcc323b532\") " pod="openshift-monitoring/alertmanager-main-0" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.951419 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/65f1aaad-6876-4c62-8969-dddcc323b532-metrics-client-ca\") pod \"alertmanager-main-0\" (UID: \"65f1aaad-6876-4c62-8969-dddcc323b532\") " pod="openshift-monitoring/alertmanager-main-0" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.951450 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/65f1aaad-6876-4c62-8969-dddcc323b532-config-volume\") pod \"alertmanager-main-0\" (UID: \"65f1aaad-6876-4c62-8969-dddcc323b532\") " pod="openshift-monitoring/alertmanager-main-0" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.951480 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-kube-rbac-proxy-metric\" (UniqueName: \"kubernetes.io/secret/65f1aaad-6876-4c62-8969-dddcc323b532-secret-alertmanager-kube-rbac-proxy-metric\") pod \"alertmanager-main-0\" (UID: \"65f1aaad-6876-4c62-8969-dddcc323b532\") " pod="openshift-monitoring/alertmanager-main-0" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.951521 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/65f1aaad-6876-4c62-8969-dddcc323b532-secret-alertmanager-kube-rbac-proxy-web\") pod \"alertmanager-main-0\" (UID: \"65f1aaad-6876-4c62-8969-dddcc323b532\") " pod="openshift-monitoring/alertmanager-main-0" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.951545 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/65f1aaad-6876-4c62-8969-dddcc323b532-config-out\") pod \"alertmanager-main-0\" (UID: \"65f1aaad-6876-4c62-8969-dddcc323b532\") " pod="openshift-monitoring/alertmanager-main-0" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.951570 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-main-db\" (UniqueName: \"kubernetes.io/empty-dir/65f1aaad-6876-4c62-8969-dddcc323b532-alertmanager-main-db\") pod \"alertmanager-main-0\" (UID: \"65f1aaad-6876-4c62-8969-dddcc323b532\") " pod="openshift-monitoring/alertmanager-main-0" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.951602 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-main-tls\" (UniqueName: \"kubernetes.io/secret/65f1aaad-6876-4c62-8969-dddcc323b532-secret-alertmanager-main-tls\") pod \"alertmanager-main-0\" (UID: \"65f1aaad-6876-4c62-8969-dddcc323b532\") " pod="openshift-monitoring/alertmanager-main-0" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.951710 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qxksm\" (UniqueName: \"kubernetes.io/projected/65f1aaad-6876-4c62-8969-dddcc323b532-kube-api-access-qxksm\") pod \"alertmanager-main-0\" (UID: \"65f1aaad-6876-4c62-8969-dddcc323b532\") " pod="openshift-monitoring/alertmanager-main-0" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.951742 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/65f1aaad-6876-4c62-8969-dddcc323b532-alertmanager-trusted-ca-bundle\") pod \"alertmanager-main-0\" (UID: \"65f1aaad-6876-4c62-8969-dddcc323b532\") " pod="openshift-monitoring/alertmanager-main-0" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.952571 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-main-db\" (UniqueName: \"kubernetes.io/empty-dir/65f1aaad-6876-4c62-8969-dddcc323b532-alertmanager-main-db\") pod \"alertmanager-main-0\" (UID: \"65f1aaad-6876-4c62-8969-dddcc323b532\") " pod="openshift-monitoring/alertmanager-main-0" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.953176 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/65f1aaad-6876-4c62-8969-dddcc323b532-alertmanager-trusted-ca-bundle\") pod \"alertmanager-main-0\" (UID: \"65f1aaad-6876-4c62-8969-dddcc323b532\") " pod="openshift-monitoring/alertmanager-main-0" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.954105 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/65f1aaad-6876-4c62-8969-dddcc323b532-metrics-client-ca\") pod \"alertmanager-main-0\" (UID: \"65f1aaad-6876-4c62-8969-dddcc323b532\") " pod="openshift-monitoring/alertmanager-main-0" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.962005 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/65f1aaad-6876-4c62-8969-dddcc323b532-config-out\") pod \"alertmanager-main-0\" (UID: \"65f1aaad-6876-4c62-8969-dddcc323b532\") " pod="openshift-monitoring/alertmanager-main-0" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.962421 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-main-tls\" (UniqueName: \"kubernetes.io/secret/65f1aaad-6876-4c62-8969-dddcc323b532-secret-alertmanager-main-tls\") pod \"alertmanager-main-0\" (UID: \"65f1aaad-6876-4c62-8969-dddcc323b532\") " pod="openshift-monitoring/alertmanager-main-0" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.962634 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/65f1aaad-6876-4c62-8969-dddcc323b532-secret-alertmanager-kube-rbac-proxy-web\") pod \"alertmanager-main-0\" (UID: \"65f1aaad-6876-4c62-8969-dddcc323b532\") " pod="openshift-monitoring/alertmanager-main-0" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.963721 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/65f1aaad-6876-4c62-8969-dddcc323b532-web-config\") pod \"alertmanager-main-0\" (UID: \"65f1aaad-6876-4c62-8969-dddcc323b532\") " pod="openshift-monitoring/alertmanager-main-0" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.964747 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-kube-rbac-proxy-metric\" (UniqueName: \"kubernetes.io/secret/65f1aaad-6876-4c62-8969-dddcc323b532-secret-alertmanager-kube-rbac-proxy-metric\") pod \"alertmanager-main-0\" (UID: \"65f1aaad-6876-4c62-8969-dddcc323b532\") " pod="openshift-monitoring/alertmanager-main-0" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.965233 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/65f1aaad-6876-4c62-8969-dddcc323b532-config-volume\") pod \"alertmanager-main-0\" (UID: \"65f1aaad-6876-4c62-8969-dddcc323b532\") " pod="openshift-monitoring/alertmanager-main-0" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.969847 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/65f1aaad-6876-4c62-8969-dddcc323b532-tls-assets\") pod \"alertmanager-main-0\" (UID: \"65f1aaad-6876-4c62-8969-dddcc323b532\") " pod="openshift-monitoring/alertmanager-main-0" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.969874 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/65f1aaad-6876-4c62-8969-dddcc323b532-secret-alertmanager-kube-rbac-proxy\") pod \"alertmanager-main-0\" (UID: \"65f1aaad-6876-4c62-8969-dddcc323b532\") " pod="openshift-monitoring/alertmanager-main-0" Dec 09 15:14:48 crc kubenswrapper[4716]: I1209 15:14:48.978424 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qxksm\" (UniqueName: \"kubernetes.io/projected/65f1aaad-6876-4c62-8969-dddcc323b532-kube-api-access-qxksm\") pod \"alertmanager-main-0\" (UID: \"65f1aaad-6876-4c62-8969-dddcc323b532\") " pod="openshift-monitoring/alertmanager-main-0" Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.148473 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/alertmanager-main-0" Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.298048 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-hk6h5" event={"ID":"ebb184a7-5322-40ab-8636-4b1d78882422","Type":"ContainerStarted","Data":"fb77ac2a02e10fac4bdba0c7b347c18fc4b67c9690a26937f56fc9c4931d1ac8"} Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.302090 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-6dz7r" event={"ID":"5cc5cfca-7c35-49ed-9b4a-d4416082b264","Type":"ContainerStarted","Data":"6e78343ee3bd7818423dd4cf783e8571066c7f34e95a4f8dbc91fc7b5f0216d5"} Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.302168 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-6dz7r" event={"ID":"5cc5cfca-7c35-49ed-9b4a-d4416082b264","Type":"ContainerStarted","Data":"55bacc6396d55c60a1cd19891f5e1e61119fe199bc7e66cfe87fb5e592dbfd71"} Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.302184 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-6dz7r" event={"ID":"5cc5cfca-7c35-49ed-9b4a-d4416082b264","Type":"ContainerStarted","Data":"4764737dd071de2fb3ef2d72fd3ef69b88abac9bb5435679590edccc2e93efb4"} Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.610147 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/thanos-querier-7566f64cd7-tmljw"] Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.611912 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/thanos-querier-7566f64cd7-tmljw" Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.614837 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy" Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.615161 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-dockercfg-wlv27" Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.615371 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-tls" Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.615512 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-grpc-tls-fgpv8l055uifk" Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.615663 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-web" Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.615856 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-metrics" Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.616016 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-rules" Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.626252 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/thanos-querier-7566f64cd7-tmljw"] Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.678335 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-rules\" (UniqueName: \"kubernetes.io/secret/e18650db-9d90-47d6-a850-70643a99837a-secret-thanos-querier-kube-rbac-proxy-rules\") pod \"thanos-querier-7566f64cd7-tmljw\" (UID: \"e18650db-9d90-47d6-a850-70643a99837a\") " pod="openshift-monitoring/thanos-querier-7566f64cd7-tmljw" Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.678401 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/e18650db-9d90-47d6-a850-70643a99837a-secret-grpc-tls\") pod \"thanos-querier-7566f64cd7-tmljw\" (UID: \"e18650db-9d90-47d6-a850-70643a99837a\") " pod="openshift-monitoring/thanos-querier-7566f64cd7-tmljw" Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.678545 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-tls\" (UniqueName: \"kubernetes.io/secret/e18650db-9d90-47d6-a850-70643a99837a-secret-thanos-querier-tls\") pod \"thanos-querier-7566f64cd7-tmljw\" (UID: \"e18650db-9d90-47d6-a850-70643a99837a\") " pod="openshift-monitoring/thanos-querier-7566f64cd7-tmljw" Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.678605 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-metrics\" (UniqueName: \"kubernetes.io/secret/e18650db-9d90-47d6-a850-70643a99837a-secret-thanos-querier-kube-rbac-proxy-metrics\") pod \"thanos-querier-7566f64cd7-tmljw\" (UID: \"e18650db-9d90-47d6-a850-70643a99837a\") " pod="openshift-monitoring/thanos-querier-7566f64cd7-tmljw" Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.678759 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/e18650db-9d90-47d6-a850-70643a99837a-metrics-client-ca\") pod \"thanos-querier-7566f64cd7-tmljw\" (UID: \"e18650db-9d90-47d6-a850-70643a99837a\") " pod="openshift-monitoring/thanos-querier-7566f64cd7-tmljw" Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.678835 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/e18650db-9d90-47d6-a850-70643a99837a-secret-thanos-querier-kube-rbac-proxy\") pod \"thanos-querier-7566f64cd7-tmljw\" (UID: \"e18650db-9d90-47d6-a850-70643a99837a\") " pod="openshift-monitoring/thanos-querier-7566f64cd7-tmljw" Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.678876 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kp69g\" (UniqueName: \"kubernetes.io/projected/e18650db-9d90-47d6-a850-70643a99837a-kube-api-access-kp69g\") pod \"thanos-querier-7566f64cd7-tmljw\" (UID: \"e18650db-9d90-47d6-a850-70643a99837a\") " pod="openshift-monitoring/thanos-querier-7566f64cd7-tmljw" Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.678929 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/e18650db-9d90-47d6-a850-70643a99837a-secret-thanos-querier-kube-rbac-proxy-web\") pod \"thanos-querier-7566f64cd7-tmljw\" (UID: \"e18650db-9d90-47d6-a850-70643a99837a\") " pod="openshift-monitoring/thanos-querier-7566f64cd7-tmljw" Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.780400 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-rules\" (UniqueName: \"kubernetes.io/secret/e18650db-9d90-47d6-a850-70643a99837a-secret-thanos-querier-kube-rbac-proxy-rules\") pod \"thanos-querier-7566f64cd7-tmljw\" (UID: \"e18650db-9d90-47d6-a850-70643a99837a\") " pod="openshift-monitoring/thanos-querier-7566f64cd7-tmljw" Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.780464 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/e18650db-9d90-47d6-a850-70643a99837a-secret-grpc-tls\") pod \"thanos-querier-7566f64cd7-tmljw\" (UID: \"e18650db-9d90-47d6-a850-70643a99837a\") " pod="openshift-monitoring/thanos-querier-7566f64cd7-tmljw" Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.780528 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-tls\" (UniqueName: \"kubernetes.io/secret/e18650db-9d90-47d6-a850-70643a99837a-secret-thanos-querier-tls\") pod \"thanos-querier-7566f64cd7-tmljw\" (UID: \"e18650db-9d90-47d6-a850-70643a99837a\") " pod="openshift-monitoring/thanos-querier-7566f64cd7-tmljw" Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.781658 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-metrics\" (UniqueName: \"kubernetes.io/secret/e18650db-9d90-47d6-a850-70643a99837a-secret-thanos-querier-kube-rbac-proxy-metrics\") pod \"thanos-querier-7566f64cd7-tmljw\" (UID: \"e18650db-9d90-47d6-a850-70643a99837a\") " pod="openshift-monitoring/thanos-querier-7566f64cd7-tmljw" Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.781703 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/e18650db-9d90-47d6-a850-70643a99837a-metrics-client-ca\") pod \"thanos-querier-7566f64cd7-tmljw\" (UID: \"e18650db-9d90-47d6-a850-70643a99837a\") " pod="openshift-monitoring/thanos-querier-7566f64cd7-tmljw" Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.781772 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/e18650db-9d90-47d6-a850-70643a99837a-secret-thanos-querier-kube-rbac-proxy\") pod \"thanos-querier-7566f64cd7-tmljw\" (UID: \"e18650db-9d90-47d6-a850-70643a99837a\") " pod="openshift-monitoring/thanos-querier-7566f64cd7-tmljw" Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.781835 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kp69g\" (UniqueName: \"kubernetes.io/projected/e18650db-9d90-47d6-a850-70643a99837a-kube-api-access-kp69g\") pod \"thanos-querier-7566f64cd7-tmljw\" (UID: \"e18650db-9d90-47d6-a850-70643a99837a\") " pod="openshift-monitoring/thanos-querier-7566f64cd7-tmljw" Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.781875 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/e18650db-9d90-47d6-a850-70643a99837a-secret-thanos-querier-kube-rbac-proxy-web\") pod \"thanos-querier-7566f64cd7-tmljw\" (UID: \"e18650db-9d90-47d6-a850-70643a99837a\") " pod="openshift-monitoring/thanos-querier-7566f64cd7-tmljw" Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.783065 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/e18650db-9d90-47d6-a850-70643a99837a-metrics-client-ca\") pod \"thanos-querier-7566f64cd7-tmljw\" (UID: \"e18650db-9d90-47d6-a850-70643a99837a\") " pod="openshift-monitoring/thanos-querier-7566f64cd7-tmljw" Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.785974 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy-metrics\" (UniqueName: \"kubernetes.io/secret/e18650db-9d90-47d6-a850-70643a99837a-secret-thanos-querier-kube-rbac-proxy-metrics\") pod \"thanos-querier-7566f64cd7-tmljw\" (UID: \"e18650db-9d90-47d6-a850-70643a99837a\") " pod="openshift-monitoring/thanos-querier-7566f64cd7-tmljw" Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.786279 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-tls\" (UniqueName: \"kubernetes.io/secret/e18650db-9d90-47d6-a850-70643a99837a-secret-thanos-querier-tls\") pod \"thanos-querier-7566f64cd7-tmljw\" (UID: \"e18650db-9d90-47d6-a850-70643a99837a\") " pod="openshift-monitoring/thanos-querier-7566f64cd7-tmljw" Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.786907 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/e18650db-9d90-47d6-a850-70643a99837a-secret-grpc-tls\") pod \"thanos-querier-7566f64cd7-tmljw\" (UID: \"e18650db-9d90-47d6-a850-70643a99837a\") " pod="openshift-monitoring/thanos-querier-7566f64cd7-tmljw" Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.787598 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy-rules\" (UniqueName: \"kubernetes.io/secret/e18650db-9d90-47d6-a850-70643a99837a-secret-thanos-querier-kube-rbac-proxy-rules\") pod \"thanos-querier-7566f64cd7-tmljw\" (UID: \"e18650db-9d90-47d6-a850-70643a99837a\") " pod="openshift-monitoring/thanos-querier-7566f64cd7-tmljw" Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.789002 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/e18650db-9d90-47d6-a850-70643a99837a-secret-thanos-querier-kube-rbac-proxy\") pod \"thanos-querier-7566f64cd7-tmljw\" (UID: \"e18650db-9d90-47d6-a850-70643a99837a\") " pod="openshift-monitoring/thanos-querier-7566f64cd7-tmljw" Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.790505 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/e18650db-9d90-47d6-a850-70643a99837a-secret-thanos-querier-kube-rbac-proxy-web\") pod \"thanos-querier-7566f64cd7-tmljw\" (UID: \"e18650db-9d90-47d6-a850-70643a99837a\") " pod="openshift-monitoring/thanos-querier-7566f64cd7-tmljw" Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.800951 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kp69g\" (UniqueName: \"kubernetes.io/projected/e18650db-9d90-47d6-a850-70643a99837a-kube-api-access-kp69g\") pod \"thanos-querier-7566f64cd7-tmljw\" (UID: \"e18650db-9d90-47d6-a850-70643a99837a\") " pod="openshift-monitoring/thanos-querier-7566f64cd7-tmljw" Dec 09 15:14:49 crc kubenswrapper[4716]: I1209 15:14:49.933129 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/thanos-querier-7566f64cd7-tmljw" Dec 09 15:14:50 crc kubenswrapper[4716]: I1209 15:14:50.351277 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-hk6h5" event={"ID":"ebb184a7-5322-40ab-8636-4b1d78882422","Type":"ContainerStarted","Data":"5cace3c1c530f66c9fa5a96665a20604b10d0fa8061daf72d1e64bb7bda0d744"} Dec 09 15:14:50 crc kubenswrapper[4716]: I1209 15:14:50.361328 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-s6sf4" event={"ID":"0c47b1a7-bbe0-4381-9276-58fa98d7f2bc","Type":"ContainerStarted","Data":"9bdba60400c5391eae4474dac20bf37ac36bad48d1daf08251130f9f4138864e"} Dec 09 15:14:50 crc kubenswrapper[4716]: I1209 15:14:50.377085 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/alertmanager-main-0"] Dec 09 15:14:50 crc kubenswrapper[4716]: W1209 15:14:50.398099 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod65f1aaad_6876_4c62_8969_dddcc323b532.slice/crio-8c97697cc418655fecebbb5209232be366b84deb67f1f6678fed924d6c03283d WatchSource:0}: Error finding container 8c97697cc418655fecebbb5209232be366b84deb67f1f6678fed924d6c03283d: Status 404 returned error can't find the container with id 8c97697cc418655fecebbb5209232be366b84deb67f1f6678fed924d6c03283d Dec 09 15:14:50 crc kubenswrapper[4716]: I1209 15:14:50.531438 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/thanos-querier-7566f64cd7-tmljw"] Dec 09 15:14:50 crc kubenswrapper[4716]: W1209 15:14:50.534193 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode18650db_9d90_47d6_a850_70643a99837a.slice/crio-c101f1821bb9bf06117676cf1124330f8568d432d1cc85c34196de9458c28759 WatchSource:0}: Error finding container c101f1821bb9bf06117676cf1124330f8568d432d1cc85c34196de9458c28759: Status 404 returned error can't find the container with id c101f1821bb9bf06117676cf1124330f8568d432d1cc85c34196de9458c28759 Dec 09 15:14:51 crc kubenswrapper[4716]: I1209 15:14:51.370900 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"65f1aaad-6876-4c62-8969-dddcc323b532","Type":"ContainerStarted","Data":"8c97697cc418655fecebbb5209232be366b84deb67f1f6678fed924d6c03283d"} Dec 09 15:14:51 crc kubenswrapper[4716]: I1209 15:14:51.374159 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-hk6h5" event={"ID":"ebb184a7-5322-40ab-8636-4b1d78882422","Type":"ContainerStarted","Data":"7a7cd6960365457873a3556d00a7e2cdf08b914be992426c3ea6cb9e7e5e21f8"} Dec 09 15:14:51 crc kubenswrapper[4716]: I1209 15:14:51.374188 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-hk6h5" event={"ID":"ebb184a7-5322-40ab-8636-4b1d78882422","Type":"ContainerStarted","Data":"3149138d4d8a24a9aec0a3f51c328d9ba1e33ddfe1f19e209be16c9392a8a55e"} Dec 09 15:14:51 crc kubenswrapper[4716]: I1209 15:14:51.377894 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-6dz7r" event={"ID":"5cc5cfca-7c35-49ed-9b4a-d4416082b264","Type":"ContainerStarted","Data":"1b4ed3666690fe1cda0a0e42ea72a2d21e7160206ea4ca9495b87bafc9a140d1"} Dec 09 15:14:51 crc kubenswrapper[4716]: I1209 15:14:51.379382 4716 generic.go:334] "Generic (PLEG): container finished" podID="0c47b1a7-bbe0-4381-9276-58fa98d7f2bc" containerID="9bdba60400c5391eae4474dac20bf37ac36bad48d1daf08251130f9f4138864e" exitCode=0 Dec 09 15:14:51 crc kubenswrapper[4716]: I1209 15:14:51.379468 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-s6sf4" event={"ID":"0c47b1a7-bbe0-4381-9276-58fa98d7f2bc","Type":"ContainerDied","Data":"9bdba60400c5391eae4474dac20bf37ac36bad48d1daf08251130f9f4138864e"} Dec 09 15:14:51 crc kubenswrapper[4716]: I1209 15:14:51.380641 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-7566f64cd7-tmljw" event={"ID":"e18650db-9d90-47d6-a850-70643a99837a","Type":"ContainerStarted","Data":"c101f1821bb9bf06117676cf1124330f8568d432d1cc85c34196de9458c28759"} Dec 09 15:14:51 crc kubenswrapper[4716]: I1209 15:14:51.404064 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-hk6h5" podStartSLOduration=2.635220645 podStartE2EDuration="4.404034193s" podCreationTimestamp="2025-12-09 15:14:47 +0000 UTC" firstStartedPulling="2025-12-09 15:14:48.297438561 +0000 UTC m=+375.452182549" lastFinishedPulling="2025-12-09 15:14:50.066252109 +0000 UTC m=+377.220996097" observedRunningTime="2025-12-09 15:14:51.39544669 +0000 UTC m=+378.550190668" watchObservedRunningTime="2025-12-09 15:14:51.404034193 +0000 UTC m=+378.558778181" Dec 09 15:14:51 crc kubenswrapper[4716]: I1209 15:14:51.426948 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/openshift-state-metrics-566fddb674-6dz7r" podStartSLOduration=2.195272936 podStartE2EDuration="4.426877646s" podCreationTimestamp="2025-12-09 15:14:47 +0000 UTC" firstStartedPulling="2025-12-09 15:14:48.883919259 +0000 UTC m=+376.038663247" lastFinishedPulling="2025-12-09 15:14:51.115523969 +0000 UTC m=+378.270267957" observedRunningTime="2025-12-09 15:14:51.413683807 +0000 UTC m=+378.568427815" watchObservedRunningTime="2025-12-09 15:14:51.426877646 +0000 UTC m=+378.581621634" Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.323959 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-5b48c78497-llxwq"] Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.325712 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5b48c78497-llxwq" Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.326306 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/87dab013-ee64-49d5-abd0-6cdaec27805c-console-serving-cert\") pod \"console-5b48c78497-llxwq\" (UID: \"87dab013-ee64-49d5-abd0-6cdaec27805c\") " pod="openshift-console/console-5b48c78497-llxwq" Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.326370 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/87dab013-ee64-49d5-abd0-6cdaec27805c-service-ca\") pod \"console-5b48c78497-llxwq\" (UID: \"87dab013-ee64-49d5-abd0-6cdaec27805c\") " pod="openshift-console/console-5b48c78497-llxwq" Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.326420 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-86t25\" (UniqueName: \"kubernetes.io/projected/87dab013-ee64-49d5-abd0-6cdaec27805c-kube-api-access-86t25\") pod \"console-5b48c78497-llxwq\" (UID: \"87dab013-ee64-49d5-abd0-6cdaec27805c\") " pod="openshift-console/console-5b48c78497-llxwq" Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.326807 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/87dab013-ee64-49d5-abd0-6cdaec27805c-console-oauth-config\") pod \"console-5b48c78497-llxwq\" (UID: \"87dab013-ee64-49d5-abd0-6cdaec27805c\") " pod="openshift-console/console-5b48c78497-llxwq" Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.326880 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/87dab013-ee64-49d5-abd0-6cdaec27805c-oauth-serving-cert\") pod \"console-5b48c78497-llxwq\" (UID: \"87dab013-ee64-49d5-abd0-6cdaec27805c\") " pod="openshift-console/console-5b48c78497-llxwq" Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.327044 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/87dab013-ee64-49d5-abd0-6cdaec27805c-console-config\") pod \"console-5b48c78497-llxwq\" (UID: \"87dab013-ee64-49d5-abd0-6cdaec27805c\") " pod="openshift-console/console-5b48c78497-llxwq" Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.327090 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/87dab013-ee64-49d5-abd0-6cdaec27805c-trusted-ca-bundle\") pod \"console-5b48c78497-llxwq\" (UID: \"87dab013-ee64-49d5-abd0-6cdaec27805c\") " pod="openshift-console/console-5b48c78497-llxwq" Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.346650 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-5b48c78497-llxwq"] Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.391562 4716 generic.go:334] "Generic (PLEG): container finished" podID="65f1aaad-6876-4c62-8969-dddcc323b532" containerID="45d24e106d086ddb3a78d13e9921bff56d04be504d03ef7ae7d8f59d28aacb90" exitCode=0 Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.391735 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"65f1aaad-6876-4c62-8969-dddcc323b532","Type":"ContainerDied","Data":"45d24e106d086ddb3a78d13e9921bff56d04be504d03ef7ae7d8f59d28aacb90"} Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.407978 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-s6sf4" event={"ID":"0c47b1a7-bbe0-4381-9276-58fa98d7f2bc","Type":"ContainerStarted","Data":"6551dbf21c756bb233ea88c07a4fe718b3f2bba389a73e4773402b30af5e35a6"} Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.408086 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-s6sf4" event={"ID":"0c47b1a7-bbe0-4381-9276-58fa98d7f2bc","Type":"ContainerStarted","Data":"3cd0fae39fd2fdad7c59ac14ba794792f8ed70fd0483047cfec7e3bb1f54227d"} Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.428308 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/87dab013-ee64-49d5-abd0-6cdaec27805c-console-config\") pod \"console-5b48c78497-llxwq\" (UID: \"87dab013-ee64-49d5-abd0-6cdaec27805c\") " pod="openshift-console/console-5b48c78497-llxwq" Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.428371 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/87dab013-ee64-49d5-abd0-6cdaec27805c-trusted-ca-bundle\") pod \"console-5b48c78497-llxwq\" (UID: \"87dab013-ee64-49d5-abd0-6cdaec27805c\") " pod="openshift-console/console-5b48c78497-llxwq" Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.428427 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/87dab013-ee64-49d5-abd0-6cdaec27805c-console-serving-cert\") pod \"console-5b48c78497-llxwq\" (UID: \"87dab013-ee64-49d5-abd0-6cdaec27805c\") " pod="openshift-console/console-5b48c78497-llxwq" Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.429817 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/87dab013-ee64-49d5-abd0-6cdaec27805c-console-config\") pod \"console-5b48c78497-llxwq\" (UID: \"87dab013-ee64-49d5-abd0-6cdaec27805c\") " pod="openshift-console/console-5b48c78497-llxwq" Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.429881 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/87dab013-ee64-49d5-abd0-6cdaec27805c-trusted-ca-bundle\") pod \"console-5b48c78497-llxwq\" (UID: \"87dab013-ee64-49d5-abd0-6cdaec27805c\") " pod="openshift-console/console-5b48c78497-llxwq" Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.429986 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/87dab013-ee64-49d5-abd0-6cdaec27805c-service-ca\") pod \"console-5b48c78497-llxwq\" (UID: \"87dab013-ee64-49d5-abd0-6cdaec27805c\") " pod="openshift-console/console-5b48c78497-llxwq" Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.430063 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-86t25\" (UniqueName: \"kubernetes.io/projected/87dab013-ee64-49d5-abd0-6cdaec27805c-kube-api-access-86t25\") pod \"console-5b48c78497-llxwq\" (UID: \"87dab013-ee64-49d5-abd0-6cdaec27805c\") " pod="openshift-console/console-5b48c78497-llxwq" Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.430163 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/87dab013-ee64-49d5-abd0-6cdaec27805c-console-oauth-config\") pod \"console-5b48c78497-llxwq\" (UID: \"87dab013-ee64-49d5-abd0-6cdaec27805c\") " pod="openshift-console/console-5b48c78497-llxwq" Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.430208 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/87dab013-ee64-49d5-abd0-6cdaec27805c-oauth-serving-cert\") pod \"console-5b48c78497-llxwq\" (UID: \"87dab013-ee64-49d5-abd0-6cdaec27805c\") " pod="openshift-console/console-5b48c78497-llxwq" Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.431087 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/87dab013-ee64-49d5-abd0-6cdaec27805c-service-ca\") pod \"console-5b48c78497-llxwq\" (UID: \"87dab013-ee64-49d5-abd0-6cdaec27805c\") " pod="openshift-console/console-5b48c78497-llxwq" Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.431527 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/87dab013-ee64-49d5-abd0-6cdaec27805c-oauth-serving-cert\") pod \"console-5b48c78497-llxwq\" (UID: \"87dab013-ee64-49d5-abd0-6cdaec27805c\") " pod="openshift-console/console-5b48c78497-llxwq" Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.436517 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/87dab013-ee64-49d5-abd0-6cdaec27805c-console-oauth-config\") pod \"console-5b48c78497-llxwq\" (UID: \"87dab013-ee64-49d5-abd0-6cdaec27805c\") " pod="openshift-console/console-5b48c78497-llxwq" Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.437178 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/87dab013-ee64-49d5-abd0-6cdaec27805c-console-serving-cert\") pod \"console-5b48c78497-llxwq\" (UID: \"87dab013-ee64-49d5-abd0-6cdaec27805c\") " pod="openshift-console/console-5b48c78497-llxwq" Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.450943 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-86t25\" (UniqueName: \"kubernetes.io/projected/87dab013-ee64-49d5-abd0-6cdaec27805c-kube-api-access-86t25\") pod \"console-5b48c78497-llxwq\" (UID: \"87dab013-ee64-49d5-abd0-6cdaec27805c\") " pod="openshift-console/console-5b48c78497-llxwq" Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.468535 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/node-exporter-s6sf4" podStartSLOduration=3.378451063 podStartE2EDuration="5.4685069s" podCreationTimestamp="2025-12-09 15:14:47 +0000 UTC" firstStartedPulling="2025-12-09 15:14:47.969907517 +0000 UTC m=+375.124651505" lastFinishedPulling="2025-12-09 15:14:50.059963354 +0000 UTC m=+377.214707342" observedRunningTime="2025-12-09 15:14:52.467582513 +0000 UTC m=+379.622326511" watchObservedRunningTime="2025-12-09 15:14:52.4685069 +0000 UTC m=+379.623250888" Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.651668 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5b48c78497-llxwq" Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.840447 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/metrics-server-565dc599f6-zdjv2"] Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.841247 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/metrics-server-565dc599f6-zdjv2" Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.849846 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"metrics-server-audit-profiles" Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.849874 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kubelet-serving-ca-bundle" Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.849892 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-client-certs" Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.850203 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-3i5j24slc1ut1" Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.850419 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-tls" Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.853223 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-dockercfg-7ml6s" Dec 09 15:14:52 crc kubenswrapper[4716]: I1209 15:14:52.886903 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/metrics-server-565dc599f6-zdjv2"] Dec 09 15:14:53 crc kubenswrapper[4716]: I1209 15:14:53.041887 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sbwlm\" (UniqueName: \"kubernetes.io/projected/1ea0dd9f-12f0-4906-8b21-604d66ac8d41-kube-api-access-sbwlm\") pod \"metrics-server-565dc599f6-zdjv2\" (UID: \"1ea0dd9f-12f0-4906-8b21-604d66ac8d41\") " pod="openshift-monitoring/metrics-server-565dc599f6-zdjv2" Dec 09 15:14:53 crc kubenswrapper[4716]: I1209 15:14:53.041992 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-log\" (UniqueName: \"kubernetes.io/empty-dir/1ea0dd9f-12f0-4906-8b21-604d66ac8d41-audit-log\") pod \"metrics-server-565dc599f6-zdjv2\" (UID: \"1ea0dd9f-12f0-4906-8b21-604d66ac8d41\") " pod="openshift-monitoring/metrics-server-565dc599f6-zdjv2" Dec 09 15:14:53 crc kubenswrapper[4716]: I1209 15:14:53.042031 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ea0dd9f-12f0-4906-8b21-604d66ac8d41-client-ca-bundle\") pod \"metrics-server-565dc599f6-zdjv2\" (UID: \"1ea0dd9f-12f0-4906-8b21-604d66ac8d41\") " pod="openshift-monitoring/metrics-server-565dc599f6-zdjv2" Dec 09 15:14:53 crc kubenswrapper[4716]: I1209 15:14:53.042107 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/1ea0dd9f-12f0-4906-8b21-604d66ac8d41-secret-metrics-client-certs\") pod \"metrics-server-565dc599f6-zdjv2\" (UID: \"1ea0dd9f-12f0-4906-8b21-604d66ac8d41\") " pod="openshift-monitoring/metrics-server-565dc599f6-zdjv2" Dec 09 15:14:53 crc kubenswrapper[4716]: I1209 15:14:53.042150 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1ea0dd9f-12f0-4906-8b21-604d66ac8d41-configmap-kubelet-serving-ca-bundle\") pod \"metrics-server-565dc599f6-zdjv2\" (UID: \"1ea0dd9f-12f0-4906-8b21-604d66ac8d41\") " pod="openshift-monitoring/metrics-server-565dc599f6-zdjv2" Dec 09 15:14:53 crc kubenswrapper[4716]: I1209 15:14:53.042193 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-metrics-server-tls\" (UniqueName: \"kubernetes.io/secret/1ea0dd9f-12f0-4906-8b21-604d66ac8d41-secret-metrics-server-tls\") pod \"metrics-server-565dc599f6-zdjv2\" (UID: \"1ea0dd9f-12f0-4906-8b21-604d66ac8d41\") " pod="openshift-monitoring/metrics-server-565dc599f6-zdjv2" Dec 09 15:14:53 crc kubenswrapper[4716]: I1209 15:14:53.042217 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-server-audit-profiles\" (UniqueName: \"kubernetes.io/configmap/1ea0dd9f-12f0-4906-8b21-604d66ac8d41-metrics-server-audit-profiles\") pod \"metrics-server-565dc599f6-zdjv2\" (UID: \"1ea0dd9f-12f0-4906-8b21-604d66ac8d41\") " pod="openshift-monitoring/metrics-server-565dc599f6-zdjv2" Dec 09 15:14:53 crc kubenswrapper[4716]: I1209 15:14:53.144449 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/1ea0dd9f-12f0-4906-8b21-604d66ac8d41-secret-metrics-client-certs\") pod \"metrics-server-565dc599f6-zdjv2\" (UID: \"1ea0dd9f-12f0-4906-8b21-604d66ac8d41\") " pod="openshift-monitoring/metrics-server-565dc599f6-zdjv2" Dec 09 15:14:53 crc kubenswrapper[4716]: I1209 15:14:53.144527 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1ea0dd9f-12f0-4906-8b21-604d66ac8d41-configmap-kubelet-serving-ca-bundle\") pod \"metrics-server-565dc599f6-zdjv2\" (UID: \"1ea0dd9f-12f0-4906-8b21-604d66ac8d41\") " pod="openshift-monitoring/metrics-server-565dc599f6-zdjv2" Dec 09 15:14:53 crc kubenswrapper[4716]: I1209 15:14:53.144574 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-metrics-server-tls\" (UniqueName: \"kubernetes.io/secret/1ea0dd9f-12f0-4906-8b21-604d66ac8d41-secret-metrics-server-tls\") pod \"metrics-server-565dc599f6-zdjv2\" (UID: \"1ea0dd9f-12f0-4906-8b21-604d66ac8d41\") " pod="openshift-monitoring/metrics-server-565dc599f6-zdjv2" Dec 09 15:14:53 crc kubenswrapper[4716]: I1209 15:14:53.144601 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-server-audit-profiles\" (UniqueName: \"kubernetes.io/configmap/1ea0dd9f-12f0-4906-8b21-604d66ac8d41-metrics-server-audit-profiles\") pod \"metrics-server-565dc599f6-zdjv2\" (UID: \"1ea0dd9f-12f0-4906-8b21-604d66ac8d41\") " pod="openshift-monitoring/metrics-server-565dc599f6-zdjv2" Dec 09 15:14:53 crc kubenswrapper[4716]: I1209 15:14:53.144655 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sbwlm\" (UniqueName: \"kubernetes.io/projected/1ea0dd9f-12f0-4906-8b21-604d66ac8d41-kube-api-access-sbwlm\") pod \"metrics-server-565dc599f6-zdjv2\" (UID: \"1ea0dd9f-12f0-4906-8b21-604d66ac8d41\") " pod="openshift-monitoring/metrics-server-565dc599f6-zdjv2" Dec 09 15:14:53 crc kubenswrapper[4716]: I1209 15:14:53.144702 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-log\" (UniqueName: \"kubernetes.io/empty-dir/1ea0dd9f-12f0-4906-8b21-604d66ac8d41-audit-log\") pod \"metrics-server-565dc599f6-zdjv2\" (UID: \"1ea0dd9f-12f0-4906-8b21-604d66ac8d41\") " pod="openshift-monitoring/metrics-server-565dc599f6-zdjv2" Dec 09 15:14:53 crc kubenswrapper[4716]: I1209 15:14:53.144726 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ea0dd9f-12f0-4906-8b21-604d66ac8d41-client-ca-bundle\") pod \"metrics-server-565dc599f6-zdjv2\" (UID: \"1ea0dd9f-12f0-4906-8b21-604d66ac8d41\") " pod="openshift-monitoring/metrics-server-565dc599f6-zdjv2" Dec 09 15:14:53 crc kubenswrapper[4716]: I1209 15:14:53.145967 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1ea0dd9f-12f0-4906-8b21-604d66ac8d41-configmap-kubelet-serving-ca-bundle\") pod \"metrics-server-565dc599f6-zdjv2\" (UID: \"1ea0dd9f-12f0-4906-8b21-604d66ac8d41\") " pod="openshift-monitoring/metrics-server-565dc599f6-zdjv2" Dec 09 15:14:53 crc kubenswrapper[4716]: I1209 15:14:53.146599 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-log\" (UniqueName: \"kubernetes.io/empty-dir/1ea0dd9f-12f0-4906-8b21-604d66ac8d41-audit-log\") pod \"metrics-server-565dc599f6-zdjv2\" (UID: \"1ea0dd9f-12f0-4906-8b21-604d66ac8d41\") " pod="openshift-monitoring/metrics-server-565dc599f6-zdjv2" Dec 09 15:14:53 crc kubenswrapper[4716]: I1209 15:14:53.149230 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-server-audit-profiles\" (UniqueName: \"kubernetes.io/configmap/1ea0dd9f-12f0-4906-8b21-604d66ac8d41-metrics-server-audit-profiles\") pod \"metrics-server-565dc599f6-zdjv2\" (UID: \"1ea0dd9f-12f0-4906-8b21-604d66ac8d41\") " pod="openshift-monitoring/metrics-server-565dc599f6-zdjv2" Dec 09 15:14:53 crc kubenswrapper[4716]: I1209 15:14:53.151201 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ea0dd9f-12f0-4906-8b21-604d66ac8d41-client-ca-bundle\") pod \"metrics-server-565dc599f6-zdjv2\" (UID: \"1ea0dd9f-12f0-4906-8b21-604d66ac8d41\") " pod="openshift-monitoring/metrics-server-565dc599f6-zdjv2" Dec 09 15:14:53 crc kubenswrapper[4716]: I1209 15:14:53.152526 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-metrics-server-tls\" (UniqueName: \"kubernetes.io/secret/1ea0dd9f-12f0-4906-8b21-604d66ac8d41-secret-metrics-server-tls\") pod \"metrics-server-565dc599f6-zdjv2\" (UID: \"1ea0dd9f-12f0-4906-8b21-604d66ac8d41\") " pod="openshift-monitoring/metrics-server-565dc599f6-zdjv2" Dec 09 15:14:53 crc kubenswrapper[4716]: I1209 15:14:53.154521 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/1ea0dd9f-12f0-4906-8b21-604d66ac8d41-secret-metrics-client-certs\") pod \"metrics-server-565dc599f6-zdjv2\" (UID: \"1ea0dd9f-12f0-4906-8b21-604d66ac8d41\") " pod="openshift-monitoring/metrics-server-565dc599f6-zdjv2" Dec 09 15:14:53 crc kubenswrapper[4716]: I1209 15:14:53.161753 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sbwlm\" (UniqueName: \"kubernetes.io/projected/1ea0dd9f-12f0-4906-8b21-604d66ac8d41-kube-api-access-sbwlm\") pod \"metrics-server-565dc599f6-zdjv2\" (UID: \"1ea0dd9f-12f0-4906-8b21-604d66ac8d41\") " pod="openshift-monitoring/metrics-server-565dc599f6-zdjv2" Dec 09 15:14:53 crc kubenswrapper[4716]: I1209 15:14:53.164754 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/metrics-server-565dc599f6-zdjv2" Dec 09 15:14:53 crc kubenswrapper[4716]: I1209 15:14:53.322508 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/monitoring-plugin-588c6cf68c-pzb5w"] Dec 09 15:14:53 crc kubenswrapper[4716]: I1209 15:14:53.323637 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/monitoring-plugin-588c6cf68c-pzb5w" Dec 09 15:14:53 crc kubenswrapper[4716]: I1209 15:14:53.328443 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"default-dockercfg-6tstp" Dec 09 15:14:53 crc kubenswrapper[4716]: I1209 15:14:53.328559 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"monitoring-plugin-cert" Dec 09 15:14:53 crc kubenswrapper[4716]: I1209 15:14:53.342651 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/monitoring-plugin-588c6cf68c-pzb5w"] Dec 09 15:14:53 crc kubenswrapper[4716]: I1209 15:14:53.450695 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"monitoring-plugin-cert\" (UniqueName: \"kubernetes.io/secret/80f5f5d3-4060-4ef8-a164-522526390e5c-monitoring-plugin-cert\") pod \"monitoring-plugin-588c6cf68c-pzb5w\" (UID: \"80f5f5d3-4060-4ef8-a164-522526390e5c\") " pod="openshift-monitoring/monitoring-plugin-588c6cf68c-pzb5w" Dec 09 15:14:53 crc kubenswrapper[4716]: I1209 15:14:53.561030 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"monitoring-plugin-cert\" (UniqueName: \"kubernetes.io/secret/80f5f5d3-4060-4ef8-a164-522526390e5c-monitoring-plugin-cert\") pod \"monitoring-plugin-588c6cf68c-pzb5w\" (UID: \"80f5f5d3-4060-4ef8-a164-522526390e5c\") " pod="openshift-monitoring/monitoring-plugin-588c6cf68c-pzb5w" Dec 09 15:14:53 crc kubenswrapper[4716]: I1209 15:14:53.567797 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"monitoring-plugin-cert\" (UniqueName: \"kubernetes.io/secret/80f5f5d3-4060-4ef8-a164-522526390e5c-monitoring-plugin-cert\") pod \"monitoring-plugin-588c6cf68c-pzb5w\" (UID: \"80f5f5d3-4060-4ef8-a164-522526390e5c\") " pod="openshift-monitoring/monitoring-plugin-588c6cf68c-pzb5w" Dec 09 15:14:53 crc kubenswrapper[4716]: I1209 15:14:53.666061 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/monitoring-plugin-588c6cf68c-pzb5w" Dec 09 15:14:53 crc kubenswrapper[4716]: I1209 15:14:53.786449 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-5b48c78497-llxwq"] Dec 09 15:14:53 crc kubenswrapper[4716]: W1209 15:14:53.792930 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod87dab013_ee64_49d5_abd0_6cdaec27805c.slice/crio-cd54d61b73b0fc74709d713e920ac9c1324e8754bd083e3240928750f9ab2858 WatchSource:0}: Error finding container cd54d61b73b0fc74709d713e920ac9c1324e8754bd083e3240928750f9ab2858: Status 404 returned error can't find the container with id cd54d61b73b0fc74709d713e920ac9c1324e8754bd083e3240928750f9ab2858 Dec 09 15:14:53 crc kubenswrapper[4716]: I1209 15:14:53.848425 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/metrics-server-565dc599f6-zdjv2"] Dec 09 15:14:53 crc kubenswrapper[4716]: W1209 15:14:53.873527 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1ea0dd9f_12f0_4906_8b21_604d66ac8d41.slice/crio-c3ba9ce8718858700bbec2a9ff22cee462226da74b69ab1508e01f33f13efd50 WatchSource:0}: Error finding container c3ba9ce8718858700bbec2a9ff22cee462226da74b69ab1508e01f33f13efd50: Status 404 returned error can't find the container with id c3ba9ce8718858700bbec2a9ff22cee462226da74b69ab1508e01f33f13efd50 Dec 09 15:14:53 crc kubenswrapper[4716]: I1209 15:14:53.946048 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/monitoring-plugin-588c6cf68c-pzb5w"] Dec 09 15:14:53 crc kubenswrapper[4716]: W1209 15:14:53.953463 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod80f5f5d3_4060_4ef8_a164_522526390e5c.slice/crio-185eb0f6f57c7ffb2eb6933cdca4171970e4226acc3ceffad63ddb9da1fb1164 WatchSource:0}: Error finding container 185eb0f6f57c7ffb2eb6933cdca4171970e4226acc3ceffad63ddb9da1fb1164: Status 404 returned error can't find the container with id 185eb0f6f57c7ffb2eb6933cdca4171970e4226acc3ceffad63ddb9da1fb1164 Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.026831 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/prometheus-k8s-0"] Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.029913 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.032585 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-web-config" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.032773 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-thanos-prometheus-http-client-file" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.033036 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.033107 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-kube-rbac-proxy-web" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.036366 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-dockercfg-9wfpr" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.036817 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-grpc-tls-b3nl4u33gbdp5" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.037221 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-rbac-proxy" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.037475 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-thanos-sidecar-tls" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.038042 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-tls-assets-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.038287 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"serving-certs-ca-bundle" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.043652 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-tls" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.046736 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"prometheus-trusted-ca-bundle" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.050869 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"prometheus-k8s-rulefiles-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.059922 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-k8s-0"] Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.071847 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-configmap-kubelet-serving-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.071938 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-tls-assets\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.071981 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-k8s-db\" (UniqueName: \"kubernetes.io/empty-dir/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-prometheus-k8s-db\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.072015 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-prometheus-k8s-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-secret-prometheus-k8s-kube-rbac-proxy-web\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.072047 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-secret-grpc-tls\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.072094 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-web-config\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.072122 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-config-out\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.072145 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-config\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.072172 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-secret-metrics-client-certs\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.072201 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-k8s-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-prometheus-k8s-rulefiles-0\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.072451 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bxf94\" (UniqueName: \"kubernetes.io/projected/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-kube-api-access-bxf94\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.072481 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-secret-kube-rbac-proxy\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.072515 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-prometheus-k8s-thanos-sidecar-tls\" (UniqueName: \"kubernetes.io/secret/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-secret-prometheus-k8s-thanos-sidecar-tls\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.072537 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-serving-certs-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-configmap-serving-certs-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.072573 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-prometheus-trusted-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.072607 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-configmap-metrics-client-ca\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.072655 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-prometheus-k8s-tls\" (UniqueName: \"kubernetes.io/secret/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-secret-prometheus-k8s-tls\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.072680 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-thanos-prometheus-http-client-file\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.174134 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-web-config\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.174756 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-config-out\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.174791 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-config\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.174820 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-secret-metrics-client-certs\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.174849 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-k8s-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-prometheus-k8s-rulefiles-0\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.174938 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bxf94\" (UniqueName: \"kubernetes.io/projected/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-kube-api-access-bxf94\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.174961 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-secret-kube-rbac-proxy\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.174993 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-prometheus-k8s-thanos-sidecar-tls\" (UniqueName: \"kubernetes.io/secret/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-secret-prometheus-k8s-thanos-sidecar-tls\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.175023 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-serving-certs-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-configmap-serving-certs-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.175061 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-prometheus-trusted-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.175093 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-configmap-metrics-client-ca\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.175126 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-prometheus-k8s-tls\" (UniqueName: \"kubernetes.io/secret/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-secret-prometheus-k8s-tls\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.175173 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-thanos-prometheus-http-client-file\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.175205 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-configmap-kubelet-serving-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.175239 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-tls-assets\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.175274 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-k8s-db\" (UniqueName: \"kubernetes.io/empty-dir/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-prometheus-k8s-db\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.175304 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-prometheus-k8s-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-secret-prometheus-k8s-kube-rbac-proxy-web\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.175340 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-secret-grpc-tls\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.178036 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-prometheus-trusted-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.178902 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-serving-certs-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-configmap-serving-certs-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.182128 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-config-out\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.182644 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-secret-metrics-client-certs\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.182987 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-config\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.183073 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-web-config\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.183653 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-thanos-prometheus-http-client-file\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.183831 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-configmap-kubelet-serving-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.185921 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-prometheus-k8s-tls\" (UniqueName: \"kubernetes.io/secret/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-secret-prometheus-k8s-tls\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.186047 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-k8s-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-prometheus-k8s-rulefiles-0\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.186497 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-prometheus-k8s-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-secret-prometheus-k8s-kube-rbac-proxy-web\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.186616 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-secret-grpc-tls\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.188833 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-k8s-db\" (UniqueName: \"kubernetes.io/empty-dir/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-prometheus-k8s-db\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.189593 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-configmap-metrics-client-ca\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.189963 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-prometheus-k8s-thanos-sidecar-tls\" (UniqueName: \"kubernetes.io/secret/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-secret-prometheus-k8s-thanos-sidecar-tls\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.193776 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-tls-assets\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.194104 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-secret-kube-rbac-proxy\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.198916 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bxf94\" (UniqueName: \"kubernetes.io/projected/53fb541d-7fe5-4b5f-a2cc-4c66b394b157-kube-api-access-bxf94\") pod \"prometheus-k8s-0\" (UID: \"53fb541d-7fe5-4b5f-a2cc-4c66b394b157\") " pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.361059 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.435060 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/metrics-server-565dc599f6-zdjv2" event={"ID":"1ea0dd9f-12f0-4906-8b21-604d66ac8d41","Type":"ContainerStarted","Data":"c3ba9ce8718858700bbec2a9ff22cee462226da74b69ab1508e01f33f13efd50"} Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.437571 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5b48c78497-llxwq" event={"ID":"87dab013-ee64-49d5-abd0-6cdaec27805c","Type":"ContainerStarted","Data":"bb40daa498c71fc1f070403e2f7498400e51e35982830c666765e873b3a80e0f"} Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.437617 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5b48c78497-llxwq" event={"ID":"87dab013-ee64-49d5-abd0-6cdaec27805c","Type":"ContainerStarted","Data":"cd54d61b73b0fc74709d713e920ac9c1324e8754bd083e3240928750f9ab2858"} Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.446372 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-7566f64cd7-tmljw" event={"ID":"e18650db-9d90-47d6-a850-70643a99837a","Type":"ContainerStarted","Data":"259d49df5a02d9033d227888da76d1c38dafc68ee2074038408f3b587c622b36"} Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.446430 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-7566f64cd7-tmljw" event={"ID":"e18650db-9d90-47d6-a850-70643a99837a","Type":"ContainerStarted","Data":"5f48b31a3fd1f56110c1ddee5b84de5a6c2aba40b55b83408995b49cbad7e4c3"} Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.446444 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-7566f64cd7-tmljw" event={"ID":"e18650db-9d90-47d6-a850-70643a99837a","Type":"ContainerStarted","Data":"de5d0c7b03b8870c806e7412bcc14d1b138deed50a224b2014e55c8a8c6e0ca6"} Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.447938 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/monitoring-plugin-588c6cf68c-pzb5w" event={"ID":"80f5f5d3-4060-4ef8-a164-522526390e5c","Type":"ContainerStarted","Data":"185eb0f6f57c7ffb2eb6933cdca4171970e4226acc3ceffad63ddb9da1fb1164"} Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.466993 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-5b48c78497-llxwq" podStartSLOduration=2.466969198 podStartE2EDuration="2.466969198s" podCreationTimestamp="2025-12-09 15:14:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:14:54.462867537 +0000 UTC m=+381.617611535" watchObservedRunningTime="2025-12-09 15:14:54.466969198 +0000 UTC m=+381.621713186" Dec 09 15:14:54 crc kubenswrapper[4716]: I1209 15:14:54.880764 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-k8s-0"] Dec 09 15:14:57 crc kubenswrapper[4716]: I1209 15:14:57.474659 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"53fb541d-7fe5-4b5f-a2cc-4c66b394b157","Type":"ContainerStarted","Data":"21489e43fc79da2629328befe75bdbda7a850b0826104b33304ca6350626e084"} Dec 09 15:14:58 crc kubenswrapper[4716]: I1209 15:14:58.483394 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/monitoring-plugin-588c6cf68c-pzb5w" event={"ID":"80f5f5d3-4060-4ef8-a164-522526390e5c","Type":"ContainerStarted","Data":"9376a2a9a63e7b5b253464ef411ee4033631f61ed1ba8df2bc4f868adc8d773b"} Dec 09 15:14:58 crc kubenswrapper[4716]: I1209 15:14:58.484868 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/monitoring-plugin-588c6cf68c-pzb5w" Dec 09 15:14:58 crc kubenswrapper[4716]: I1209 15:14:58.489594 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-7566f64cd7-tmljw" event={"ID":"e18650db-9d90-47d6-a850-70643a99837a","Type":"ContainerStarted","Data":"4d1e3b5c4e848e1f4485d492299fadd32c002f2bfb22c78caa784d3b9ec69aed"} Dec 09 15:14:58 crc kubenswrapper[4716]: I1209 15:14:58.489682 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-7566f64cd7-tmljw" event={"ID":"e18650db-9d90-47d6-a850-70643a99837a","Type":"ContainerStarted","Data":"3099f19850e2e59c044d0492941ab1fe3f8dfbdf9422e308a04787ff30bafd08"} Dec 09 15:14:58 crc kubenswrapper[4716]: I1209 15:14:58.489694 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-7566f64cd7-tmljw" event={"ID":"e18650db-9d90-47d6-a850-70643a99837a","Type":"ContainerStarted","Data":"2a07e6bc737a1b7e3626bf45c76deb785c9c9c33ea82c7f4c050e9acd9a02053"} Dec 09 15:14:58 crc kubenswrapper[4716]: I1209 15:14:58.491379 4716 generic.go:334] "Generic (PLEG): container finished" podID="53fb541d-7fe5-4b5f-a2cc-4c66b394b157" containerID="639c6682eff35063c3d22d08679eae18a13c3e97c22c2fe0752a1493fa17b02e" exitCode=0 Dec 09 15:14:58 crc kubenswrapper[4716]: I1209 15:14:58.491481 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"53fb541d-7fe5-4b5f-a2cc-4c66b394b157","Type":"ContainerDied","Data":"639c6682eff35063c3d22d08679eae18a13c3e97c22c2fe0752a1493fa17b02e"} Dec 09 15:14:58 crc kubenswrapper[4716]: I1209 15:14:58.494391 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/metrics-server-565dc599f6-zdjv2" event={"ID":"1ea0dd9f-12f0-4906-8b21-604d66ac8d41","Type":"ContainerStarted","Data":"22e35bf3573955153ec76a824280ca96e2ab62af572d7957171422b211e5f1ac"} Dec 09 15:14:58 crc kubenswrapper[4716]: I1209 15:14:58.509975 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"65f1aaad-6876-4c62-8969-dddcc323b532","Type":"ContainerStarted","Data":"c63f228795c6ee7b153bb61f04e39586d450e5c7a287c9bcd2efc603b3bf6f68"} Dec 09 15:14:58 crc kubenswrapper[4716]: I1209 15:14:58.510039 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"65f1aaad-6876-4c62-8969-dddcc323b532","Type":"ContainerStarted","Data":"d3b7dd00413b12a0c3374db9ac7addcf746e2b50a28cc4def6a17b3396bf9c43"} Dec 09 15:14:58 crc kubenswrapper[4716]: I1209 15:14:58.510087 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"65f1aaad-6876-4c62-8969-dddcc323b532","Type":"ContainerStarted","Data":"26a8d15c8c289ccac1e981d7c81ff745f376c6c36ba1eb0e160711c3078d3806"} Dec 09 15:14:58 crc kubenswrapper[4716]: I1209 15:14:58.510097 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"65f1aaad-6876-4c62-8969-dddcc323b532","Type":"ContainerStarted","Data":"6bcee13250ac9b74bc9002b93e65df1e47aa4fa22c9df20f174026d251697c02"} Dec 09 15:14:58 crc kubenswrapper[4716]: I1209 15:14:58.510106 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"65f1aaad-6876-4c62-8969-dddcc323b532","Type":"ContainerStarted","Data":"a6c97fd865380beae4f7c3351c96001507ad07efeaa9ef8cbfd3847bcae1a815"} Dec 09 15:14:58 crc kubenswrapper[4716]: I1209 15:14:58.518281 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/monitoring-plugin-588c6cf68c-pzb5w" podStartSLOduration=2.193832513 podStartE2EDuration="5.518268096s" podCreationTimestamp="2025-12-09 15:14:53 +0000 UTC" firstStartedPulling="2025-12-09 15:14:53.961295722 +0000 UTC m=+381.116039710" lastFinishedPulling="2025-12-09 15:14:57.285731285 +0000 UTC m=+384.440475293" observedRunningTime="2025-12-09 15:14:58.501502832 +0000 UTC m=+385.656246820" watchObservedRunningTime="2025-12-09 15:14:58.518268096 +0000 UTC m=+385.673012074" Dec 09 15:14:58 crc kubenswrapper[4716]: I1209 15:14:58.562015 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/metrics-server-565dc599f6-zdjv2" podStartSLOduration=3.172270528 podStartE2EDuration="6.561986815s" podCreationTimestamp="2025-12-09 15:14:52 +0000 UTC" firstStartedPulling="2025-12-09 15:14:53.880681496 +0000 UTC m=+381.035425484" lastFinishedPulling="2025-12-09 15:14:57.270397773 +0000 UTC m=+384.425141771" observedRunningTime="2025-12-09 15:14:58.520972216 +0000 UTC m=+385.675716204" watchObservedRunningTime="2025-12-09 15:14:58.561986815 +0000 UTC m=+385.716730803" Dec 09 15:14:58 crc kubenswrapper[4716]: I1209 15:14:58.591673 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/thanos-querier-7566f64cd7-tmljw" podStartSLOduration=2.8692915770000003 podStartE2EDuration="9.591643989s" podCreationTimestamp="2025-12-09 15:14:49 +0000 UTC" firstStartedPulling="2025-12-09 15:14:50.545238948 +0000 UTC m=+377.699982936" lastFinishedPulling="2025-12-09 15:14:57.26759135 +0000 UTC m=+384.422335348" observedRunningTime="2025-12-09 15:14:58.586407975 +0000 UTC m=+385.741151963" watchObservedRunningTime="2025-12-09 15:14:58.591643989 +0000 UTC m=+385.746387977" Dec 09 15:14:58 crc kubenswrapper[4716]: I1209 15:14:58.604374 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/monitoring-plugin-588c6cf68c-pzb5w" Dec 09 15:14:59 crc kubenswrapper[4716]: I1209 15:14:59.524124 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"65f1aaad-6876-4c62-8969-dddcc323b532","Type":"ContainerStarted","Data":"2c2b924654b834c25941f1d4ebe5ef65261ab3e625b4f866e976cb7da37fa49c"} Dec 09 15:14:59 crc kubenswrapper[4716]: I1209 15:14:59.526358 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/thanos-querier-7566f64cd7-tmljw" Dec 09 15:14:59 crc kubenswrapper[4716]: I1209 15:14:59.539141 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/thanos-querier-7566f64cd7-tmljw" Dec 09 15:14:59 crc kubenswrapper[4716]: I1209 15:14:59.584379 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/alertmanager-main-0" podStartSLOduration=4.7190721369999995 podStartE2EDuration="11.584335051s" podCreationTimestamp="2025-12-09 15:14:48 +0000 UTC" firstStartedPulling="2025-12-09 15:14:50.403373467 +0000 UTC m=+377.558117455" lastFinishedPulling="2025-12-09 15:14:57.268636381 +0000 UTC m=+384.423380369" observedRunningTime="2025-12-09 15:14:59.559511109 +0000 UTC m=+386.714255117" watchObservedRunningTime="2025-12-09 15:14:59.584335051 +0000 UTC m=+386.739079039" Dec 09 15:15:00 crc kubenswrapper[4716]: I1209 15:15:00.183560 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421555-9mxcj"] Dec 09 15:15:00 crc kubenswrapper[4716]: I1209 15:15:00.185155 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421555-9mxcj" Dec 09 15:15:00 crc kubenswrapper[4716]: I1209 15:15:00.190212 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 09 15:15:00 crc kubenswrapper[4716]: I1209 15:15:00.190613 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 09 15:15:00 crc kubenswrapper[4716]: I1209 15:15:00.191446 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421555-9mxcj"] Dec 09 15:15:00 crc kubenswrapper[4716]: I1209 15:15:00.204511 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/be35eec6-211e-4a8e-8af2-77699f4e5953-secret-volume\") pod \"collect-profiles-29421555-9mxcj\" (UID: \"be35eec6-211e-4a8e-8af2-77699f4e5953\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421555-9mxcj" Dec 09 15:15:00 crc kubenswrapper[4716]: I1209 15:15:00.204601 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/be35eec6-211e-4a8e-8af2-77699f4e5953-config-volume\") pod \"collect-profiles-29421555-9mxcj\" (UID: \"be35eec6-211e-4a8e-8af2-77699f4e5953\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421555-9mxcj" Dec 09 15:15:00 crc kubenswrapper[4716]: I1209 15:15:00.204698 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8k22d\" (UniqueName: \"kubernetes.io/projected/be35eec6-211e-4a8e-8af2-77699f4e5953-kube-api-access-8k22d\") pod \"collect-profiles-29421555-9mxcj\" (UID: \"be35eec6-211e-4a8e-8af2-77699f4e5953\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421555-9mxcj" Dec 09 15:15:00 crc kubenswrapper[4716]: I1209 15:15:00.306683 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/be35eec6-211e-4a8e-8af2-77699f4e5953-secret-volume\") pod \"collect-profiles-29421555-9mxcj\" (UID: \"be35eec6-211e-4a8e-8af2-77699f4e5953\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421555-9mxcj" Dec 09 15:15:00 crc kubenswrapper[4716]: I1209 15:15:00.306735 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/be35eec6-211e-4a8e-8af2-77699f4e5953-config-volume\") pod \"collect-profiles-29421555-9mxcj\" (UID: \"be35eec6-211e-4a8e-8af2-77699f4e5953\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421555-9mxcj" Dec 09 15:15:00 crc kubenswrapper[4716]: I1209 15:15:00.306805 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8k22d\" (UniqueName: \"kubernetes.io/projected/be35eec6-211e-4a8e-8af2-77699f4e5953-kube-api-access-8k22d\") pod \"collect-profiles-29421555-9mxcj\" (UID: \"be35eec6-211e-4a8e-8af2-77699f4e5953\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421555-9mxcj" Dec 09 15:15:00 crc kubenswrapper[4716]: I1209 15:15:00.310168 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/be35eec6-211e-4a8e-8af2-77699f4e5953-config-volume\") pod \"collect-profiles-29421555-9mxcj\" (UID: \"be35eec6-211e-4a8e-8af2-77699f4e5953\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421555-9mxcj" Dec 09 15:15:00 crc kubenswrapper[4716]: I1209 15:15:00.315238 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/be35eec6-211e-4a8e-8af2-77699f4e5953-secret-volume\") pod \"collect-profiles-29421555-9mxcj\" (UID: \"be35eec6-211e-4a8e-8af2-77699f4e5953\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421555-9mxcj" Dec 09 15:15:00 crc kubenswrapper[4716]: I1209 15:15:00.326571 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8k22d\" (UniqueName: \"kubernetes.io/projected/be35eec6-211e-4a8e-8af2-77699f4e5953-kube-api-access-8k22d\") pod \"collect-profiles-29421555-9mxcj\" (UID: \"be35eec6-211e-4a8e-8af2-77699f4e5953\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421555-9mxcj" Dec 09 15:15:00 crc kubenswrapper[4716]: I1209 15:15:00.519473 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421555-9mxcj" Dec 09 15:15:02 crc kubenswrapper[4716]: I1209 15:15:02.652461 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-5b48c78497-llxwq" Dec 09 15:15:02 crc kubenswrapper[4716]: I1209 15:15:02.653050 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-5b48c78497-llxwq" Dec 09 15:15:02 crc kubenswrapper[4716]: I1209 15:15:02.658651 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-5b48c78497-llxwq" Dec 09 15:15:03 crc kubenswrapper[4716]: I1209 15:15:03.575747 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"53fb541d-7fe5-4b5f-a2cc-4c66b394b157","Type":"ContainerStarted","Data":"05cf39b5ec104b6d9e689d7c1f824206f5a20c7c7322b73fce819c3cbd0dd373"} Dec 09 15:15:03 crc kubenswrapper[4716]: I1209 15:15:03.580304 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-5b48c78497-llxwq" Dec 09 15:15:03 crc kubenswrapper[4716]: I1209 15:15:03.658063 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-ckjw6"] Dec 09 15:15:03 crc kubenswrapper[4716]: I1209 15:15:03.892596 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421555-9mxcj"] Dec 09 15:15:03 crc kubenswrapper[4716]: W1209 15:15:03.902698 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbe35eec6_211e_4a8e_8af2_77699f4e5953.slice/crio-57c69ab4446c9bf3b655b3bc5b2e6612d444b855fe8746354639032c20616c99 WatchSource:0}: Error finding container 57c69ab4446c9bf3b655b3bc5b2e6612d444b855fe8746354639032c20616c99: Status 404 returned error can't find the container with id 57c69ab4446c9bf3b655b3bc5b2e6612d444b855fe8746354639032c20616c99 Dec 09 15:15:04 crc kubenswrapper[4716]: I1209 15:15:04.591218 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"53fb541d-7fe5-4b5f-a2cc-4c66b394b157","Type":"ContainerStarted","Data":"3cd19ba18ea88ef45186b9f7683bde39f5bb7bc2febebac2fe9caebfa32f51f7"} Dec 09 15:15:04 crc kubenswrapper[4716]: I1209 15:15:04.591562 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"53fb541d-7fe5-4b5f-a2cc-4c66b394b157","Type":"ContainerStarted","Data":"696132ca58d5c91ae5dbdb9d98f04f3ab16a21094c2dc15a46f4630d9bc568bb"} Dec 09 15:15:04 crc kubenswrapper[4716]: I1209 15:15:04.591574 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"53fb541d-7fe5-4b5f-a2cc-4c66b394b157","Type":"ContainerStarted","Data":"ca452fcf84eb6cfceb36a5b5bd0f540bb64533c396cb39dcb9a592eea8f4d951"} Dec 09 15:15:04 crc kubenswrapper[4716]: I1209 15:15:04.591586 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"53fb541d-7fe5-4b5f-a2cc-4c66b394b157","Type":"ContainerStarted","Data":"a097d1f10f66dab47fdac7cb05b621581afd97cfaccc77f41a69363e060245ad"} Dec 09 15:15:04 crc kubenswrapper[4716]: I1209 15:15:04.593006 4716 generic.go:334] "Generic (PLEG): container finished" podID="be35eec6-211e-4a8e-8af2-77699f4e5953" containerID="d24df05d7fab51af97ebe0ecb9f2e8437c0a5a04bc3596395bd64614a1b979cc" exitCode=0 Dec 09 15:15:04 crc kubenswrapper[4716]: I1209 15:15:04.593082 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421555-9mxcj" event={"ID":"be35eec6-211e-4a8e-8af2-77699f4e5953","Type":"ContainerDied","Data":"d24df05d7fab51af97ebe0ecb9f2e8437c0a5a04bc3596395bd64614a1b979cc"} Dec 09 15:15:04 crc kubenswrapper[4716]: I1209 15:15:04.593159 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421555-9mxcj" event={"ID":"be35eec6-211e-4a8e-8af2-77699f4e5953","Type":"ContainerStarted","Data":"57c69ab4446c9bf3b655b3bc5b2e6612d444b855fe8746354639032c20616c99"} Dec 09 15:15:05 crc kubenswrapper[4716]: I1209 15:15:05.607029 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"53fb541d-7fe5-4b5f-a2cc-4c66b394b157","Type":"ContainerStarted","Data":"c3902a87f49aea4c21c23d3e280fe37b122daa8a454c5efe29adef031d6ee760"} Dec 09 15:15:05 crc kubenswrapper[4716]: I1209 15:15:05.924208 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421555-9mxcj" Dec 09 15:15:05 crc kubenswrapper[4716]: I1209 15:15:05.957150 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/prometheus-k8s-0" podStartSLOduration=7.1983142860000005 podStartE2EDuration="11.95710939s" podCreationTimestamp="2025-12-09 15:14:54 +0000 UTC" firstStartedPulling="2025-12-09 15:14:58.495537746 +0000 UTC m=+385.650281734" lastFinishedPulling="2025-12-09 15:15:03.25433285 +0000 UTC m=+390.409076838" observedRunningTime="2025-12-09 15:15:05.64710122 +0000 UTC m=+392.801845208" watchObservedRunningTime="2025-12-09 15:15:05.95710939 +0000 UTC m=+393.111853398" Dec 09 15:15:06 crc kubenswrapper[4716]: I1209 15:15:06.015659 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/be35eec6-211e-4a8e-8af2-77699f4e5953-config-volume\") pod \"be35eec6-211e-4a8e-8af2-77699f4e5953\" (UID: \"be35eec6-211e-4a8e-8af2-77699f4e5953\") " Dec 09 15:15:06 crc kubenswrapper[4716]: I1209 15:15:06.016112 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8k22d\" (UniqueName: \"kubernetes.io/projected/be35eec6-211e-4a8e-8af2-77699f4e5953-kube-api-access-8k22d\") pod \"be35eec6-211e-4a8e-8af2-77699f4e5953\" (UID: \"be35eec6-211e-4a8e-8af2-77699f4e5953\") " Dec 09 15:15:06 crc kubenswrapper[4716]: I1209 15:15:06.016182 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/be35eec6-211e-4a8e-8af2-77699f4e5953-secret-volume\") pod \"be35eec6-211e-4a8e-8af2-77699f4e5953\" (UID: \"be35eec6-211e-4a8e-8af2-77699f4e5953\") " Dec 09 15:15:06 crc kubenswrapper[4716]: I1209 15:15:06.016885 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/be35eec6-211e-4a8e-8af2-77699f4e5953-config-volume" (OuterVolumeSpecName: "config-volume") pod "be35eec6-211e-4a8e-8af2-77699f4e5953" (UID: "be35eec6-211e-4a8e-8af2-77699f4e5953"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:15:06 crc kubenswrapper[4716]: I1209 15:15:06.023646 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be35eec6-211e-4a8e-8af2-77699f4e5953-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "be35eec6-211e-4a8e-8af2-77699f4e5953" (UID: "be35eec6-211e-4a8e-8af2-77699f4e5953"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:15:06 crc kubenswrapper[4716]: I1209 15:15:06.023660 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be35eec6-211e-4a8e-8af2-77699f4e5953-kube-api-access-8k22d" (OuterVolumeSpecName: "kube-api-access-8k22d") pod "be35eec6-211e-4a8e-8af2-77699f4e5953" (UID: "be35eec6-211e-4a8e-8af2-77699f4e5953"). InnerVolumeSpecName "kube-api-access-8k22d". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:15:06 crc kubenswrapper[4716]: I1209 15:15:06.118785 4716 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/be35eec6-211e-4a8e-8af2-77699f4e5953-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 09 15:15:06 crc kubenswrapper[4716]: I1209 15:15:06.118831 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8k22d\" (UniqueName: \"kubernetes.io/projected/be35eec6-211e-4a8e-8af2-77699f4e5953-kube-api-access-8k22d\") on node \"crc\" DevicePath \"\"" Dec 09 15:15:06 crc kubenswrapper[4716]: I1209 15:15:06.118842 4716 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/be35eec6-211e-4a8e-8af2-77699f4e5953-config-volume\") on node \"crc\" DevicePath \"\"" Dec 09 15:15:06 crc kubenswrapper[4716]: I1209 15:15:06.616578 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421555-9mxcj" event={"ID":"be35eec6-211e-4a8e-8af2-77699f4e5953","Type":"ContainerDied","Data":"57c69ab4446c9bf3b655b3bc5b2e6612d444b855fe8746354639032c20616c99"} Dec 09 15:15:06 crc kubenswrapper[4716]: I1209 15:15:06.616726 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="57c69ab4446c9bf3b655b3bc5b2e6612d444b855fe8746354639032c20616c99" Dec 09 15:15:06 crc kubenswrapper[4716]: I1209 15:15:06.616608 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421555-9mxcj" Dec 09 15:15:09 crc kubenswrapper[4716]: I1209 15:15:09.361883 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:15:13 crc kubenswrapper[4716]: I1209 15:15:13.165012 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/metrics-server-565dc599f6-zdjv2" Dec 09 15:15:13 crc kubenswrapper[4716]: I1209 15:15:13.165116 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-monitoring/metrics-server-565dc599f6-zdjv2" Dec 09 15:15:17 crc kubenswrapper[4716]: I1209 15:15:17.922774 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 15:15:17 crc kubenswrapper[4716]: I1209 15:15:17.923154 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 15:15:28 crc kubenswrapper[4716]: I1209 15:15:28.706872 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-ckjw6" podUID="8c6090d1-1d37-4305-9cbf-3c76c3237777" containerName="console" containerID="cri-o://9c52210cf493d1dc4e76d0992dddb115f687b098b448d6a7635c3bc00b8c9d69" gracePeriod=15 Dec 09 15:15:28 crc kubenswrapper[4716]: I1209 15:15:28.961513 4716 patch_prober.go:28] interesting pod/console-f9d7485db-ckjw6 container/console namespace/openshift-console: Readiness probe status=failure output="Get \"https://10.217.0.13:8443/health\": dial tcp 10.217.0.13:8443: connect: connection refused" start-of-body= Dec 09 15:15:28 crc kubenswrapper[4716]: I1209 15:15:28.962066 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/console-f9d7485db-ckjw6" podUID="8c6090d1-1d37-4305-9cbf-3c76c3237777" containerName="console" probeResult="failure" output="Get \"https://10.217.0.13:8443/health\": dial tcp 10.217.0.13:8443: connect: connection refused" Dec 09 15:15:29 crc kubenswrapper[4716]: I1209 15:15:29.800111 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-ckjw6_8c6090d1-1d37-4305-9cbf-3c76c3237777/console/0.log" Dec 09 15:15:29 crc kubenswrapper[4716]: I1209 15:15:29.800550 4716 generic.go:334] "Generic (PLEG): container finished" podID="8c6090d1-1d37-4305-9cbf-3c76c3237777" containerID="9c52210cf493d1dc4e76d0992dddb115f687b098b448d6a7635c3bc00b8c9d69" exitCode=2 Dec 09 15:15:29 crc kubenswrapper[4716]: I1209 15:15:29.800596 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-ckjw6" event={"ID":"8c6090d1-1d37-4305-9cbf-3c76c3237777","Type":"ContainerDied","Data":"9c52210cf493d1dc4e76d0992dddb115f687b098b448d6a7635c3bc00b8c9d69"} Dec 09 15:15:29 crc kubenswrapper[4716]: I1209 15:15:29.800650 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-ckjw6" event={"ID":"8c6090d1-1d37-4305-9cbf-3c76c3237777","Type":"ContainerDied","Data":"1b19a061f0efd05c9f456caa35a1996e225c681a63eb4b787d85536290d36453"} Dec 09 15:15:29 crc kubenswrapper[4716]: I1209 15:15:29.800662 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1b19a061f0efd05c9f456caa35a1996e225c681a63eb4b787d85536290d36453" Dec 09 15:15:29 crc kubenswrapper[4716]: I1209 15:15:29.805978 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-ckjw6_8c6090d1-1d37-4305-9cbf-3c76c3237777/console/0.log" Dec 09 15:15:29 crc kubenswrapper[4716]: I1209 15:15:29.806101 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-ckjw6" Dec 09 15:15:29 crc kubenswrapper[4716]: I1209 15:15:29.971075 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8c6090d1-1d37-4305-9cbf-3c76c3237777-service-ca\") pod \"8c6090d1-1d37-4305-9cbf-3c76c3237777\" (UID: \"8c6090d1-1d37-4305-9cbf-3c76c3237777\") " Dec 09 15:15:29 crc kubenswrapper[4716]: I1209 15:15:29.971283 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-647nf\" (UniqueName: \"kubernetes.io/projected/8c6090d1-1d37-4305-9cbf-3c76c3237777-kube-api-access-647nf\") pod \"8c6090d1-1d37-4305-9cbf-3c76c3237777\" (UID: \"8c6090d1-1d37-4305-9cbf-3c76c3237777\") " Dec 09 15:15:29 crc kubenswrapper[4716]: I1209 15:15:29.971389 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8c6090d1-1d37-4305-9cbf-3c76c3237777-trusted-ca-bundle\") pod \"8c6090d1-1d37-4305-9cbf-3c76c3237777\" (UID: \"8c6090d1-1d37-4305-9cbf-3c76c3237777\") " Dec 09 15:15:29 crc kubenswrapper[4716]: I1209 15:15:29.971428 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/8c6090d1-1d37-4305-9cbf-3c76c3237777-console-serving-cert\") pod \"8c6090d1-1d37-4305-9cbf-3c76c3237777\" (UID: \"8c6090d1-1d37-4305-9cbf-3c76c3237777\") " Dec 09 15:15:29 crc kubenswrapper[4716]: I1209 15:15:29.971495 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/8c6090d1-1d37-4305-9cbf-3c76c3237777-console-config\") pod \"8c6090d1-1d37-4305-9cbf-3c76c3237777\" (UID: \"8c6090d1-1d37-4305-9cbf-3c76c3237777\") " Dec 09 15:15:29 crc kubenswrapper[4716]: I1209 15:15:29.971549 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/8c6090d1-1d37-4305-9cbf-3c76c3237777-oauth-serving-cert\") pod \"8c6090d1-1d37-4305-9cbf-3c76c3237777\" (UID: \"8c6090d1-1d37-4305-9cbf-3c76c3237777\") " Dec 09 15:15:29 crc kubenswrapper[4716]: I1209 15:15:29.971572 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/8c6090d1-1d37-4305-9cbf-3c76c3237777-console-oauth-config\") pod \"8c6090d1-1d37-4305-9cbf-3c76c3237777\" (UID: \"8c6090d1-1d37-4305-9cbf-3c76c3237777\") " Dec 09 15:15:29 crc kubenswrapper[4716]: I1209 15:15:29.972340 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8c6090d1-1d37-4305-9cbf-3c76c3237777-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "8c6090d1-1d37-4305-9cbf-3c76c3237777" (UID: "8c6090d1-1d37-4305-9cbf-3c76c3237777"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:15:29 crc kubenswrapper[4716]: I1209 15:15:29.972363 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8c6090d1-1d37-4305-9cbf-3c76c3237777-console-config" (OuterVolumeSpecName: "console-config") pod "8c6090d1-1d37-4305-9cbf-3c76c3237777" (UID: "8c6090d1-1d37-4305-9cbf-3c76c3237777"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:15:29 crc kubenswrapper[4716]: I1209 15:15:29.972351 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8c6090d1-1d37-4305-9cbf-3c76c3237777-service-ca" (OuterVolumeSpecName: "service-ca") pod "8c6090d1-1d37-4305-9cbf-3c76c3237777" (UID: "8c6090d1-1d37-4305-9cbf-3c76c3237777"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:15:29 crc kubenswrapper[4716]: I1209 15:15:29.972859 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8c6090d1-1d37-4305-9cbf-3c76c3237777-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "8c6090d1-1d37-4305-9cbf-3c76c3237777" (UID: "8c6090d1-1d37-4305-9cbf-3c76c3237777"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:15:29 crc kubenswrapper[4716]: I1209 15:15:29.980238 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c6090d1-1d37-4305-9cbf-3c76c3237777-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "8c6090d1-1d37-4305-9cbf-3c76c3237777" (UID: "8c6090d1-1d37-4305-9cbf-3c76c3237777"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:15:29 crc kubenswrapper[4716]: I1209 15:15:29.980642 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8c6090d1-1d37-4305-9cbf-3c76c3237777-kube-api-access-647nf" (OuterVolumeSpecName: "kube-api-access-647nf") pod "8c6090d1-1d37-4305-9cbf-3c76c3237777" (UID: "8c6090d1-1d37-4305-9cbf-3c76c3237777"). InnerVolumeSpecName "kube-api-access-647nf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:15:29 crc kubenswrapper[4716]: I1209 15:15:29.980813 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c6090d1-1d37-4305-9cbf-3c76c3237777-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "8c6090d1-1d37-4305-9cbf-3c76c3237777" (UID: "8c6090d1-1d37-4305-9cbf-3c76c3237777"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:15:30 crc kubenswrapper[4716]: I1209 15:15:30.074439 4716 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/8c6090d1-1d37-4305-9cbf-3c76c3237777-console-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:15:30 crc kubenswrapper[4716]: I1209 15:15:30.074490 4716 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/8c6090d1-1d37-4305-9cbf-3c76c3237777-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:15:30 crc kubenswrapper[4716]: I1209 15:15:30.074500 4716 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/8c6090d1-1d37-4305-9cbf-3c76c3237777-console-oauth-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:15:30 crc kubenswrapper[4716]: I1209 15:15:30.074510 4716 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8c6090d1-1d37-4305-9cbf-3c76c3237777-service-ca\") on node \"crc\" DevicePath \"\"" Dec 09 15:15:30 crc kubenswrapper[4716]: I1209 15:15:30.074518 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-647nf\" (UniqueName: \"kubernetes.io/projected/8c6090d1-1d37-4305-9cbf-3c76c3237777-kube-api-access-647nf\") on node \"crc\" DevicePath \"\"" Dec 09 15:15:30 crc kubenswrapper[4716]: I1209 15:15:30.074530 4716 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8c6090d1-1d37-4305-9cbf-3c76c3237777-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:15:30 crc kubenswrapper[4716]: I1209 15:15:30.074539 4716 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/8c6090d1-1d37-4305-9cbf-3c76c3237777-console-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:15:30 crc kubenswrapper[4716]: I1209 15:15:30.806932 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-ckjw6" Dec 09 15:15:30 crc kubenswrapper[4716]: I1209 15:15:30.850953 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-ckjw6"] Dec 09 15:15:30 crc kubenswrapper[4716]: I1209 15:15:30.857366 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-ckjw6"] Dec 09 15:15:31 crc kubenswrapper[4716]: I1209 15:15:31.223948 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8c6090d1-1d37-4305-9cbf-3c76c3237777" path="/var/lib/kubelet/pods/8c6090d1-1d37-4305-9cbf-3c76c3237777/volumes" Dec 09 15:15:33 crc kubenswrapper[4716]: I1209 15:15:33.173407 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-monitoring/metrics-server-565dc599f6-zdjv2" Dec 09 15:15:33 crc kubenswrapper[4716]: I1209 15:15:33.179041 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/metrics-server-565dc599f6-zdjv2" Dec 09 15:15:47 crc kubenswrapper[4716]: I1209 15:15:47.933436 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 15:15:47 crc kubenswrapper[4716]: I1209 15:15:47.934872 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 15:15:47 crc kubenswrapper[4716]: I1209 15:15:47.935273 4716 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" Dec 09 15:15:47 crc kubenswrapper[4716]: I1209 15:15:47.936416 4716 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4c6e76c9ca1315384ba9656c51a130473b399015c21d538dc1786fee74edb164"} pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 15:15:47 crc kubenswrapper[4716]: I1209 15:15:47.936506 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" containerID="cri-o://4c6e76c9ca1315384ba9656c51a130473b399015c21d538dc1786fee74edb164" gracePeriod=600 Dec 09 15:15:48 crc kubenswrapper[4716]: I1209 15:15:48.943677 4716 generic.go:334] "Generic (PLEG): container finished" podID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerID="4c6e76c9ca1315384ba9656c51a130473b399015c21d538dc1786fee74edb164" exitCode=0 Dec 09 15:15:48 crc kubenswrapper[4716]: I1209 15:15:48.943803 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerDied","Data":"4c6e76c9ca1315384ba9656c51a130473b399015c21d538dc1786fee74edb164"} Dec 09 15:15:48 crc kubenswrapper[4716]: I1209 15:15:48.944492 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerStarted","Data":"7ac024156aad5546738363991b9b7ef9a0dffb92b890f8d72d6755b45adfb383"} Dec 09 15:15:48 crc kubenswrapper[4716]: I1209 15:15:48.944519 4716 scope.go:117] "RemoveContainer" containerID="28492ec968b29725d6ed0428cfda7f0b7c95ecb25faa6dd1c8a98cdc62dfccce" Dec 09 15:15:54 crc kubenswrapper[4716]: I1209 15:15:54.361520 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:15:54 crc kubenswrapper[4716]: I1209 15:15:54.401298 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:15:55 crc kubenswrapper[4716]: I1209 15:15:55.038893 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/prometheus-k8s-0" Dec 09 15:16:06 crc kubenswrapper[4716]: I1209 15:16:06.358527 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-76868b5959-ctmcp"] Dec 09 15:16:06 crc kubenswrapper[4716]: E1209 15:16:06.359514 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c6090d1-1d37-4305-9cbf-3c76c3237777" containerName="console" Dec 09 15:16:06 crc kubenswrapper[4716]: I1209 15:16:06.359537 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c6090d1-1d37-4305-9cbf-3c76c3237777" containerName="console" Dec 09 15:16:06 crc kubenswrapper[4716]: E1209 15:16:06.359547 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be35eec6-211e-4a8e-8af2-77699f4e5953" containerName="collect-profiles" Dec 09 15:16:06 crc kubenswrapper[4716]: I1209 15:16:06.359554 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="be35eec6-211e-4a8e-8af2-77699f4e5953" containerName="collect-profiles" Dec 09 15:16:06 crc kubenswrapper[4716]: I1209 15:16:06.359780 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="be35eec6-211e-4a8e-8af2-77699f4e5953" containerName="collect-profiles" Dec 09 15:16:06 crc kubenswrapper[4716]: I1209 15:16:06.359797 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c6090d1-1d37-4305-9cbf-3c76c3237777" containerName="console" Dec 09 15:16:06 crc kubenswrapper[4716]: I1209 15:16:06.360405 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-76868b5959-ctmcp" Dec 09 15:16:06 crc kubenswrapper[4716]: I1209 15:16:06.378507 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-76868b5959-ctmcp"] Dec 09 15:16:06 crc kubenswrapper[4716]: I1209 15:16:06.380775 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/d81edbe4-5c43-454c-b78d-396a1dc373af-console-serving-cert\") pod \"console-76868b5959-ctmcp\" (UID: \"d81edbe4-5c43-454c-b78d-396a1dc373af\") " pod="openshift-console/console-76868b5959-ctmcp" Dec 09 15:16:06 crc kubenswrapper[4716]: I1209 15:16:06.380821 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/d81edbe4-5c43-454c-b78d-396a1dc373af-console-config\") pod \"console-76868b5959-ctmcp\" (UID: \"d81edbe4-5c43-454c-b78d-396a1dc373af\") " pod="openshift-console/console-76868b5959-ctmcp" Dec 09 15:16:06 crc kubenswrapper[4716]: I1209 15:16:06.380882 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/d81edbe4-5c43-454c-b78d-396a1dc373af-console-oauth-config\") pod \"console-76868b5959-ctmcp\" (UID: \"d81edbe4-5c43-454c-b78d-396a1dc373af\") " pod="openshift-console/console-76868b5959-ctmcp" Dec 09 15:16:06 crc kubenswrapper[4716]: I1209 15:16:06.380918 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d81edbe4-5c43-454c-b78d-396a1dc373af-trusted-ca-bundle\") pod \"console-76868b5959-ctmcp\" (UID: \"d81edbe4-5c43-454c-b78d-396a1dc373af\") " pod="openshift-console/console-76868b5959-ctmcp" Dec 09 15:16:06 crc kubenswrapper[4716]: I1209 15:16:06.380948 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5vtg6\" (UniqueName: \"kubernetes.io/projected/d81edbe4-5c43-454c-b78d-396a1dc373af-kube-api-access-5vtg6\") pod \"console-76868b5959-ctmcp\" (UID: \"d81edbe4-5c43-454c-b78d-396a1dc373af\") " pod="openshift-console/console-76868b5959-ctmcp" Dec 09 15:16:06 crc kubenswrapper[4716]: I1209 15:16:06.381115 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/d81edbe4-5c43-454c-b78d-396a1dc373af-oauth-serving-cert\") pod \"console-76868b5959-ctmcp\" (UID: \"d81edbe4-5c43-454c-b78d-396a1dc373af\") " pod="openshift-console/console-76868b5959-ctmcp" Dec 09 15:16:06 crc kubenswrapper[4716]: I1209 15:16:06.381281 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d81edbe4-5c43-454c-b78d-396a1dc373af-service-ca\") pod \"console-76868b5959-ctmcp\" (UID: \"d81edbe4-5c43-454c-b78d-396a1dc373af\") " pod="openshift-console/console-76868b5959-ctmcp" Dec 09 15:16:06 crc kubenswrapper[4716]: I1209 15:16:06.482551 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/d81edbe4-5c43-454c-b78d-396a1dc373af-console-serving-cert\") pod \"console-76868b5959-ctmcp\" (UID: \"d81edbe4-5c43-454c-b78d-396a1dc373af\") " pod="openshift-console/console-76868b5959-ctmcp" Dec 09 15:16:06 crc kubenswrapper[4716]: I1209 15:16:06.482613 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/d81edbe4-5c43-454c-b78d-396a1dc373af-console-config\") pod \"console-76868b5959-ctmcp\" (UID: \"d81edbe4-5c43-454c-b78d-396a1dc373af\") " pod="openshift-console/console-76868b5959-ctmcp" Dec 09 15:16:06 crc kubenswrapper[4716]: I1209 15:16:06.482698 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/d81edbe4-5c43-454c-b78d-396a1dc373af-console-oauth-config\") pod \"console-76868b5959-ctmcp\" (UID: \"d81edbe4-5c43-454c-b78d-396a1dc373af\") " pod="openshift-console/console-76868b5959-ctmcp" Dec 09 15:16:06 crc kubenswrapper[4716]: I1209 15:16:06.482736 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d81edbe4-5c43-454c-b78d-396a1dc373af-trusted-ca-bundle\") pod \"console-76868b5959-ctmcp\" (UID: \"d81edbe4-5c43-454c-b78d-396a1dc373af\") " pod="openshift-console/console-76868b5959-ctmcp" Dec 09 15:16:06 crc kubenswrapper[4716]: I1209 15:16:06.482773 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5vtg6\" (UniqueName: \"kubernetes.io/projected/d81edbe4-5c43-454c-b78d-396a1dc373af-kube-api-access-5vtg6\") pod \"console-76868b5959-ctmcp\" (UID: \"d81edbe4-5c43-454c-b78d-396a1dc373af\") " pod="openshift-console/console-76868b5959-ctmcp" Dec 09 15:16:06 crc kubenswrapper[4716]: I1209 15:16:06.482796 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/d81edbe4-5c43-454c-b78d-396a1dc373af-oauth-serving-cert\") pod \"console-76868b5959-ctmcp\" (UID: \"d81edbe4-5c43-454c-b78d-396a1dc373af\") " pod="openshift-console/console-76868b5959-ctmcp" Dec 09 15:16:06 crc kubenswrapper[4716]: I1209 15:16:06.482839 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d81edbe4-5c43-454c-b78d-396a1dc373af-service-ca\") pod \"console-76868b5959-ctmcp\" (UID: \"d81edbe4-5c43-454c-b78d-396a1dc373af\") " pod="openshift-console/console-76868b5959-ctmcp" Dec 09 15:16:06 crc kubenswrapper[4716]: I1209 15:16:06.484283 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/d81edbe4-5c43-454c-b78d-396a1dc373af-oauth-serving-cert\") pod \"console-76868b5959-ctmcp\" (UID: \"d81edbe4-5c43-454c-b78d-396a1dc373af\") " pod="openshift-console/console-76868b5959-ctmcp" Dec 09 15:16:06 crc kubenswrapper[4716]: I1209 15:16:06.484295 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/d81edbe4-5c43-454c-b78d-396a1dc373af-console-config\") pod \"console-76868b5959-ctmcp\" (UID: \"d81edbe4-5c43-454c-b78d-396a1dc373af\") " pod="openshift-console/console-76868b5959-ctmcp" Dec 09 15:16:06 crc kubenswrapper[4716]: I1209 15:16:06.484296 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d81edbe4-5c43-454c-b78d-396a1dc373af-service-ca\") pod \"console-76868b5959-ctmcp\" (UID: \"d81edbe4-5c43-454c-b78d-396a1dc373af\") " pod="openshift-console/console-76868b5959-ctmcp" Dec 09 15:16:06 crc kubenswrapper[4716]: I1209 15:16:06.484845 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d81edbe4-5c43-454c-b78d-396a1dc373af-trusted-ca-bundle\") pod \"console-76868b5959-ctmcp\" (UID: \"d81edbe4-5c43-454c-b78d-396a1dc373af\") " pod="openshift-console/console-76868b5959-ctmcp" Dec 09 15:16:06 crc kubenswrapper[4716]: I1209 15:16:06.494522 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/d81edbe4-5c43-454c-b78d-396a1dc373af-console-serving-cert\") pod \"console-76868b5959-ctmcp\" (UID: \"d81edbe4-5c43-454c-b78d-396a1dc373af\") " pod="openshift-console/console-76868b5959-ctmcp" Dec 09 15:16:06 crc kubenswrapper[4716]: I1209 15:16:06.499992 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/d81edbe4-5c43-454c-b78d-396a1dc373af-console-oauth-config\") pod \"console-76868b5959-ctmcp\" (UID: \"d81edbe4-5c43-454c-b78d-396a1dc373af\") " pod="openshift-console/console-76868b5959-ctmcp" Dec 09 15:16:06 crc kubenswrapper[4716]: I1209 15:16:06.507873 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5vtg6\" (UniqueName: \"kubernetes.io/projected/d81edbe4-5c43-454c-b78d-396a1dc373af-kube-api-access-5vtg6\") pod \"console-76868b5959-ctmcp\" (UID: \"d81edbe4-5c43-454c-b78d-396a1dc373af\") " pod="openshift-console/console-76868b5959-ctmcp" Dec 09 15:16:06 crc kubenswrapper[4716]: I1209 15:16:06.692168 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-76868b5959-ctmcp" Dec 09 15:16:07 crc kubenswrapper[4716]: I1209 15:16:07.194926 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-76868b5959-ctmcp"] Dec 09 15:16:08 crc kubenswrapper[4716]: I1209 15:16:08.119372 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-76868b5959-ctmcp" event={"ID":"d81edbe4-5c43-454c-b78d-396a1dc373af","Type":"ContainerStarted","Data":"56b9298e6de7ff2f0946284b391239362bd9ae73baa2c16bf077f09b52b4d298"} Dec 09 15:16:08 crc kubenswrapper[4716]: I1209 15:16:08.119884 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-76868b5959-ctmcp" event={"ID":"d81edbe4-5c43-454c-b78d-396a1dc373af","Type":"ContainerStarted","Data":"10ddef4f25fdbd5cf18a73429066a9f40f191b016003726b9888d796f1c5aac8"} Dec 09 15:16:08 crc kubenswrapper[4716]: I1209 15:16:08.147835 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-76868b5959-ctmcp" podStartSLOduration=2.147779958 podStartE2EDuration="2.147779958s" podCreationTimestamp="2025-12-09 15:16:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:16:08.144683027 +0000 UTC m=+455.299427035" watchObservedRunningTime="2025-12-09 15:16:08.147779958 +0000 UTC m=+455.302523946" Dec 09 15:16:16 crc kubenswrapper[4716]: I1209 15:16:16.692671 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-76868b5959-ctmcp" Dec 09 15:16:16 crc kubenswrapper[4716]: I1209 15:16:16.693588 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-76868b5959-ctmcp" Dec 09 15:16:16 crc kubenswrapper[4716]: I1209 15:16:16.703457 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-76868b5959-ctmcp" Dec 09 15:16:17 crc kubenswrapper[4716]: I1209 15:16:17.198762 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-76868b5959-ctmcp" Dec 09 15:16:17 crc kubenswrapper[4716]: I1209 15:16:17.270531 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-5b48c78497-llxwq"] Dec 09 15:16:42 crc kubenswrapper[4716]: I1209 15:16:42.318614 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-5b48c78497-llxwq" podUID="87dab013-ee64-49d5-abd0-6cdaec27805c" containerName="console" containerID="cri-o://bb40daa498c71fc1f070403e2f7498400e51e35982830c666765e873b3a80e0f" gracePeriod=15 Dec 09 15:16:42 crc kubenswrapper[4716]: I1209 15:16:42.692078 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-5b48c78497-llxwq_87dab013-ee64-49d5-abd0-6cdaec27805c/console/0.log" Dec 09 15:16:42 crc kubenswrapper[4716]: I1209 15:16:42.692517 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5b48c78497-llxwq" Dec 09 15:16:42 crc kubenswrapper[4716]: I1209 15:16:42.746059 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/87dab013-ee64-49d5-abd0-6cdaec27805c-console-serving-cert\") pod \"87dab013-ee64-49d5-abd0-6cdaec27805c\" (UID: \"87dab013-ee64-49d5-abd0-6cdaec27805c\") " Dec 09 15:16:42 crc kubenswrapper[4716]: I1209 15:16:42.746126 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/87dab013-ee64-49d5-abd0-6cdaec27805c-oauth-serving-cert\") pod \"87dab013-ee64-49d5-abd0-6cdaec27805c\" (UID: \"87dab013-ee64-49d5-abd0-6cdaec27805c\") " Dec 09 15:16:42 crc kubenswrapper[4716]: I1209 15:16:42.746152 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/87dab013-ee64-49d5-abd0-6cdaec27805c-console-config\") pod \"87dab013-ee64-49d5-abd0-6cdaec27805c\" (UID: \"87dab013-ee64-49d5-abd0-6cdaec27805c\") " Dec 09 15:16:42 crc kubenswrapper[4716]: I1209 15:16:42.746210 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/87dab013-ee64-49d5-abd0-6cdaec27805c-service-ca\") pod \"87dab013-ee64-49d5-abd0-6cdaec27805c\" (UID: \"87dab013-ee64-49d5-abd0-6cdaec27805c\") " Dec 09 15:16:42 crc kubenswrapper[4716]: I1209 15:16:42.746244 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-86t25\" (UniqueName: \"kubernetes.io/projected/87dab013-ee64-49d5-abd0-6cdaec27805c-kube-api-access-86t25\") pod \"87dab013-ee64-49d5-abd0-6cdaec27805c\" (UID: \"87dab013-ee64-49d5-abd0-6cdaec27805c\") " Dec 09 15:16:42 crc kubenswrapper[4716]: I1209 15:16:42.746300 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/87dab013-ee64-49d5-abd0-6cdaec27805c-trusted-ca-bundle\") pod \"87dab013-ee64-49d5-abd0-6cdaec27805c\" (UID: \"87dab013-ee64-49d5-abd0-6cdaec27805c\") " Dec 09 15:16:42 crc kubenswrapper[4716]: I1209 15:16:42.746328 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/87dab013-ee64-49d5-abd0-6cdaec27805c-console-oauth-config\") pod \"87dab013-ee64-49d5-abd0-6cdaec27805c\" (UID: \"87dab013-ee64-49d5-abd0-6cdaec27805c\") " Dec 09 15:16:42 crc kubenswrapper[4716]: I1209 15:16:42.747605 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87dab013-ee64-49d5-abd0-6cdaec27805c-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "87dab013-ee64-49d5-abd0-6cdaec27805c" (UID: "87dab013-ee64-49d5-abd0-6cdaec27805c"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:16:42 crc kubenswrapper[4716]: I1209 15:16:42.747740 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87dab013-ee64-49d5-abd0-6cdaec27805c-service-ca" (OuterVolumeSpecName: "service-ca") pod "87dab013-ee64-49d5-abd0-6cdaec27805c" (UID: "87dab013-ee64-49d5-abd0-6cdaec27805c"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:16:42 crc kubenswrapper[4716]: I1209 15:16:42.747759 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87dab013-ee64-49d5-abd0-6cdaec27805c-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "87dab013-ee64-49d5-abd0-6cdaec27805c" (UID: "87dab013-ee64-49d5-abd0-6cdaec27805c"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:16:42 crc kubenswrapper[4716]: I1209 15:16:42.748222 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87dab013-ee64-49d5-abd0-6cdaec27805c-console-config" (OuterVolumeSpecName: "console-config") pod "87dab013-ee64-49d5-abd0-6cdaec27805c" (UID: "87dab013-ee64-49d5-abd0-6cdaec27805c"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:16:42 crc kubenswrapper[4716]: I1209 15:16:42.754027 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87dab013-ee64-49d5-abd0-6cdaec27805c-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "87dab013-ee64-49d5-abd0-6cdaec27805c" (UID: "87dab013-ee64-49d5-abd0-6cdaec27805c"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:16:42 crc kubenswrapper[4716]: I1209 15:16:42.754160 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87dab013-ee64-49d5-abd0-6cdaec27805c-kube-api-access-86t25" (OuterVolumeSpecName: "kube-api-access-86t25") pod "87dab013-ee64-49d5-abd0-6cdaec27805c" (UID: "87dab013-ee64-49d5-abd0-6cdaec27805c"). InnerVolumeSpecName "kube-api-access-86t25". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:16:42 crc kubenswrapper[4716]: I1209 15:16:42.754915 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87dab013-ee64-49d5-abd0-6cdaec27805c-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "87dab013-ee64-49d5-abd0-6cdaec27805c" (UID: "87dab013-ee64-49d5-abd0-6cdaec27805c"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:16:42 crc kubenswrapper[4716]: I1209 15:16:42.849100 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-86t25\" (UniqueName: \"kubernetes.io/projected/87dab013-ee64-49d5-abd0-6cdaec27805c-kube-api-access-86t25\") on node \"crc\" DevicePath \"\"" Dec 09 15:16:42 crc kubenswrapper[4716]: I1209 15:16:42.849144 4716 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/87dab013-ee64-49d5-abd0-6cdaec27805c-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:16:42 crc kubenswrapper[4716]: I1209 15:16:42.849157 4716 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/87dab013-ee64-49d5-abd0-6cdaec27805c-console-oauth-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:16:42 crc kubenswrapper[4716]: I1209 15:16:42.849167 4716 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/87dab013-ee64-49d5-abd0-6cdaec27805c-console-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:16:42 crc kubenswrapper[4716]: I1209 15:16:42.849177 4716 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/87dab013-ee64-49d5-abd0-6cdaec27805c-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:16:42 crc kubenswrapper[4716]: I1209 15:16:42.849185 4716 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/87dab013-ee64-49d5-abd0-6cdaec27805c-console-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:16:42 crc kubenswrapper[4716]: I1209 15:16:42.849197 4716 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/87dab013-ee64-49d5-abd0-6cdaec27805c-service-ca\") on node \"crc\" DevicePath \"\"" Dec 09 15:16:43 crc kubenswrapper[4716]: I1209 15:16:43.366035 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-5b48c78497-llxwq_87dab013-ee64-49d5-abd0-6cdaec27805c/console/0.log" Dec 09 15:16:43 crc kubenswrapper[4716]: I1209 15:16:43.366102 4716 generic.go:334] "Generic (PLEG): container finished" podID="87dab013-ee64-49d5-abd0-6cdaec27805c" containerID="bb40daa498c71fc1f070403e2f7498400e51e35982830c666765e873b3a80e0f" exitCode=2 Dec 09 15:16:43 crc kubenswrapper[4716]: I1209 15:16:43.366147 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5b48c78497-llxwq" event={"ID":"87dab013-ee64-49d5-abd0-6cdaec27805c","Type":"ContainerDied","Data":"bb40daa498c71fc1f070403e2f7498400e51e35982830c666765e873b3a80e0f"} Dec 09 15:16:43 crc kubenswrapper[4716]: I1209 15:16:43.366197 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5b48c78497-llxwq" event={"ID":"87dab013-ee64-49d5-abd0-6cdaec27805c","Type":"ContainerDied","Data":"cd54d61b73b0fc74709d713e920ac9c1324e8754bd083e3240928750f9ab2858"} Dec 09 15:16:43 crc kubenswrapper[4716]: I1209 15:16:43.366231 4716 scope.go:117] "RemoveContainer" containerID="bb40daa498c71fc1f070403e2f7498400e51e35982830c666765e873b3a80e0f" Dec 09 15:16:43 crc kubenswrapper[4716]: I1209 15:16:43.366232 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5b48c78497-llxwq" Dec 09 15:16:43 crc kubenswrapper[4716]: I1209 15:16:43.389565 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-5b48c78497-llxwq"] Dec 09 15:16:43 crc kubenswrapper[4716]: I1209 15:16:43.393883 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-5b48c78497-llxwq"] Dec 09 15:16:43 crc kubenswrapper[4716]: I1209 15:16:43.398217 4716 scope.go:117] "RemoveContainer" containerID="bb40daa498c71fc1f070403e2f7498400e51e35982830c666765e873b3a80e0f" Dec 09 15:16:43 crc kubenswrapper[4716]: E1209 15:16:43.398899 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bb40daa498c71fc1f070403e2f7498400e51e35982830c666765e873b3a80e0f\": container with ID starting with bb40daa498c71fc1f070403e2f7498400e51e35982830c666765e873b3a80e0f not found: ID does not exist" containerID="bb40daa498c71fc1f070403e2f7498400e51e35982830c666765e873b3a80e0f" Dec 09 15:16:43 crc kubenswrapper[4716]: I1209 15:16:43.399198 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb40daa498c71fc1f070403e2f7498400e51e35982830c666765e873b3a80e0f"} err="failed to get container status \"bb40daa498c71fc1f070403e2f7498400e51e35982830c666765e873b3a80e0f\": rpc error: code = NotFound desc = could not find container \"bb40daa498c71fc1f070403e2f7498400e51e35982830c666765e873b3a80e0f\": container with ID starting with bb40daa498c71fc1f070403e2f7498400e51e35982830c666765e873b3a80e0f not found: ID does not exist" Dec 09 15:16:43 crc kubenswrapper[4716]: I1209 15:16:43.653718 4716 patch_prober.go:28] interesting pod/console-5b48c78497-llxwq container/console namespace/openshift-console: Readiness probe status=failure output="Get \"https://10.217.0.77:8443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 09 15:16:43 crc kubenswrapper[4716]: I1209 15:16:43.653817 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/console-5b48c78497-llxwq" podUID="87dab013-ee64-49d5-abd0-6cdaec27805c" containerName="console" probeResult="failure" output="Get \"https://10.217.0.77:8443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 09 15:16:45 crc kubenswrapper[4716]: I1209 15:16:45.221830 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87dab013-ee64-49d5-abd0-6cdaec27805c" path="/var/lib/kubelet/pods/87dab013-ee64-49d5-abd0-6cdaec27805c/volumes" Dec 09 15:17:33 crc kubenswrapper[4716]: I1209 15:17:33.390846 4716 scope.go:117] "RemoveContainer" containerID="9c52210cf493d1dc4e76d0992dddb115f687b098b448d6a7635c3bc00b8c9d69" Dec 09 15:18:17 crc kubenswrapper[4716]: I1209 15:18:17.923117 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 15:18:17 crc kubenswrapper[4716]: I1209 15:18:17.924094 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 15:18:33 crc kubenswrapper[4716]: I1209 15:18:33.444815 4716 scope.go:117] "RemoveContainer" containerID="d22ce3df36cb7eb1fed6424becbf0321a0f5b3a793aeb67f134d5361c53a4229" Dec 09 15:18:47 crc kubenswrapper[4716]: I1209 15:18:47.922868 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 15:18:47 crc kubenswrapper[4716]: I1209 15:18:47.923724 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 15:19:17 crc kubenswrapper[4716]: I1209 15:19:17.921995 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 15:19:17 crc kubenswrapper[4716]: I1209 15:19:17.922874 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 15:19:17 crc kubenswrapper[4716]: I1209 15:19:17.922938 4716 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" Dec 09 15:19:17 crc kubenswrapper[4716]: I1209 15:19:17.923549 4716 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7ac024156aad5546738363991b9b7ef9a0dffb92b890f8d72d6755b45adfb383"} pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 15:19:17 crc kubenswrapper[4716]: I1209 15:19:17.923615 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" containerID="cri-o://7ac024156aad5546738363991b9b7ef9a0dffb92b890f8d72d6755b45adfb383" gracePeriod=600 Dec 09 15:19:18 crc kubenswrapper[4716]: I1209 15:19:18.471742 4716 generic.go:334] "Generic (PLEG): container finished" podID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerID="7ac024156aad5546738363991b9b7ef9a0dffb92b890f8d72d6755b45adfb383" exitCode=0 Dec 09 15:19:18 crc kubenswrapper[4716]: I1209 15:19:18.471841 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerDied","Data":"7ac024156aad5546738363991b9b7ef9a0dffb92b890f8d72d6755b45adfb383"} Dec 09 15:19:18 crc kubenswrapper[4716]: I1209 15:19:18.472437 4716 scope.go:117] "RemoveContainer" containerID="4c6e76c9ca1315384ba9656c51a130473b399015c21d538dc1786fee74edb164" Dec 09 15:19:18 crc kubenswrapper[4716]: I1209 15:19:18.472317 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerStarted","Data":"926bc2d973bc4613f2c81809b0dc284f4ef49b03a208dfa2c446ce01a7ee38bc"} Dec 09 15:20:09 crc kubenswrapper[4716]: I1209 15:20:09.483407 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rqn55"] Dec 09 15:20:09 crc kubenswrapper[4716]: E1209 15:20:09.484683 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87dab013-ee64-49d5-abd0-6cdaec27805c" containerName="console" Dec 09 15:20:09 crc kubenswrapper[4716]: I1209 15:20:09.484705 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="87dab013-ee64-49d5-abd0-6cdaec27805c" containerName="console" Dec 09 15:20:09 crc kubenswrapper[4716]: I1209 15:20:09.484847 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="87dab013-ee64-49d5-abd0-6cdaec27805c" containerName="console" Dec 09 15:20:09 crc kubenswrapper[4716]: I1209 15:20:09.485901 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rqn55" Dec 09 15:20:09 crc kubenswrapper[4716]: I1209 15:20:09.488329 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Dec 09 15:20:09 crc kubenswrapper[4716]: I1209 15:20:09.501979 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rqn55"] Dec 09 15:20:09 crc kubenswrapper[4716]: I1209 15:20:09.642455 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d15321ce-4222-4dea-9cd4-addd3749023e-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rqn55\" (UID: \"d15321ce-4222-4dea-9cd4-addd3749023e\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rqn55" Dec 09 15:20:09 crc kubenswrapper[4716]: I1209 15:20:09.643228 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d15321ce-4222-4dea-9cd4-addd3749023e-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rqn55\" (UID: \"d15321ce-4222-4dea-9cd4-addd3749023e\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rqn55" Dec 09 15:20:09 crc kubenswrapper[4716]: I1209 15:20:09.643325 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzvqh\" (UniqueName: \"kubernetes.io/projected/d15321ce-4222-4dea-9cd4-addd3749023e-kube-api-access-rzvqh\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rqn55\" (UID: \"d15321ce-4222-4dea-9cd4-addd3749023e\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rqn55" Dec 09 15:20:09 crc kubenswrapper[4716]: I1209 15:20:09.745185 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzvqh\" (UniqueName: \"kubernetes.io/projected/d15321ce-4222-4dea-9cd4-addd3749023e-kube-api-access-rzvqh\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rqn55\" (UID: \"d15321ce-4222-4dea-9cd4-addd3749023e\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rqn55" Dec 09 15:20:09 crc kubenswrapper[4716]: I1209 15:20:09.745304 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d15321ce-4222-4dea-9cd4-addd3749023e-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rqn55\" (UID: \"d15321ce-4222-4dea-9cd4-addd3749023e\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rqn55" Dec 09 15:20:09 crc kubenswrapper[4716]: I1209 15:20:09.745341 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d15321ce-4222-4dea-9cd4-addd3749023e-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rqn55\" (UID: \"d15321ce-4222-4dea-9cd4-addd3749023e\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rqn55" Dec 09 15:20:09 crc kubenswrapper[4716]: I1209 15:20:09.746271 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d15321ce-4222-4dea-9cd4-addd3749023e-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rqn55\" (UID: \"d15321ce-4222-4dea-9cd4-addd3749023e\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rqn55" Dec 09 15:20:09 crc kubenswrapper[4716]: I1209 15:20:09.746270 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d15321ce-4222-4dea-9cd4-addd3749023e-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rqn55\" (UID: \"d15321ce-4222-4dea-9cd4-addd3749023e\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rqn55" Dec 09 15:20:09 crc kubenswrapper[4716]: I1209 15:20:09.768271 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzvqh\" (UniqueName: \"kubernetes.io/projected/d15321ce-4222-4dea-9cd4-addd3749023e-kube-api-access-rzvqh\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rqn55\" (UID: \"d15321ce-4222-4dea-9cd4-addd3749023e\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rqn55" Dec 09 15:20:09 crc kubenswrapper[4716]: I1209 15:20:09.803484 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rqn55" Dec 09 15:20:10 crc kubenswrapper[4716]: I1209 15:20:10.240406 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rqn55"] Dec 09 15:20:10 crc kubenswrapper[4716]: I1209 15:20:10.833749 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rqn55" event={"ID":"d15321ce-4222-4dea-9cd4-addd3749023e","Type":"ContainerStarted","Data":"1b8a35bd270ad917ed40ec35d797168cca0e07b1cf4729bab98c17881d2e59b5"} Dec 09 15:20:10 crc kubenswrapper[4716]: I1209 15:20:10.834202 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rqn55" event={"ID":"d15321ce-4222-4dea-9cd4-addd3749023e","Type":"ContainerStarted","Data":"3b6d9e80f4e5fd8f4663be068242830fe11e07d815c01abf7dfa38e34b7c5cb3"} Dec 09 15:20:11 crc kubenswrapper[4716]: I1209 15:20:11.842158 4716 generic.go:334] "Generic (PLEG): container finished" podID="d15321ce-4222-4dea-9cd4-addd3749023e" containerID="1b8a35bd270ad917ed40ec35d797168cca0e07b1cf4729bab98c17881d2e59b5" exitCode=0 Dec 09 15:20:11 crc kubenswrapper[4716]: I1209 15:20:11.842222 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rqn55" event={"ID":"d15321ce-4222-4dea-9cd4-addd3749023e","Type":"ContainerDied","Data":"1b8a35bd270ad917ed40ec35d797168cca0e07b1cf4729bab98c17881d2e59b5"} Dec 09 15:20:11 crc kubenswrapper[4716]: I1209 15:20:11.844659 4716 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 09 15:20:13 crc kubenswrapper[4716]: I1209 15:20:13.861730 4716 generic.go:334] "Generic (PLEG): container finished" podID="d15321ce-4222-4dea-9cd4-addd3749023e" containerID="e8bcddc49c93e25306fcd6998caffc381a7a047fc38e74c9f4cbe507c3dadfbc" exitCode=0 Dec 09 15:20:13 crc kubenswrapper[4716]: I1209 15:20:13.861787 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rqn55" event={"ID":"d15321ce-4222-4dea-9cd4-addd3749023e","Type":"ContainerDied","Data":"e8bcddc49c93e25306fcd6998caffc381a7a047fc38e74c9f4cbe507c3dadfbc"} Dec 09 15:20:14 crc kubenswrapper[4716]: I1209 15:20:14.873428 4716 generic.go:334] "Generic (PLEG): container finished" podID="d15321ce-4222-4dea-9cd4-addd3749023e" containerID="27f24678e67fd5ae647916d983e66adc5523cac9b2fefcac14d97974d019ce16" exitCode=0 Dec 09 15:20:14 crc kubenswrapper[4716]: I1209 15:20:14.873567 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rqn55" event={"ID":"d15321ce-4222-4dea-9cd4-addd3749023e","Type":"ContainerDied","Data":"27f24678e67fd5ae647916d983e66adc5523cac9b2fefcac14d97974d019ce16"} Dec 09 15:20:16 crc kubenswrapper[4716]: I1209 15:20:16.184926 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rqn55" Dec 09 15:20:16 crc kubenswrapper[4716]: I1209 15:20:16.331615 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d15321ce-4222-4dea-9cd4-addd3749023e-util\") pod \"d15321ce-4222-4dea-9cd4-addd3749023e\" (UID: \"d15321ce-4222-4dea-9cd4-addd3749023e\") " Dec 09 15:20:16 crc kubenswrapper[4716]: I1209 15:20:16.331919 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rzvqh\" (UniqueName: \"kubernetes.io/projected/d15321ce-4222-4dea-9cd4-addd3749023e-kube-api-access-rzvqh\") pod \"d15321ce-4222-4dea-9cd4-addd3749023e\" (UID: \"d15321ce-4222-4dea-9cd4-addd3749023e\") " Dec 09 15:20:16 crc kubenswrapper[4716]: I1209 15:20:16.332820 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d15321ce-4222-4dea-9cd4-addd3749023e-bundle\") pod \"d15321ce-4222-4dea-9cd4-addd3749023e\" (UID: \"d15321ce-4222-4dea-9cd4-addd3749023e\") " Dec 09 15:20:16 crc kubenswrapper[4716]: I1209 15:20:16.334886 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d15321ce-4222-4dea-9cd4-addd3749023e-bundle" (OuterVolumeSpecName: "bundle") pod "d15321ce-4222-4dea-9cd4-addd3749023e" (UID: "d15321ce-4222-4dea-9cd4-addd3749023e"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:20:16 crc kubenswrapper[4716]: I1209 15:20:16.340230 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d15321ce-4222-4dea-9cd4-addd3749023e-kube-api-access-rzvqh" (OuterVolumeSpecName: "kube-api-access-rzvqh") pod "d15321ce-4222-4dea-9cd4-addd3749023e" (UID: "d15321ce-4222-4dea-9cd4-addd3749023e"). InnerVolumeSpecName "kube-api-access-rzvqh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:20:16 crc kubenswrapper[4716]: I1209 15:20:16.342734 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d15321ce-4222-4dea-9cd4-addd3749023e-util" (OuterVolumeSpecName: "util") pod "d15321ce-4222-4dea-9cd4-addd3749023e" (UID: "d15321ce-4222-4dea-9cd4-addd3749023e"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:20:16 crc kubenswrapper[4716]: I1209 15:20:16.434557 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rzvqh\" (UniqueName: \"kubernetes.io/projected/d15321ce-4222-4dea-9cd4-addd3749023e-kube-api-access-rzvqh\") on node \"crc\" DevicePath \"\"" Dec 09 15:20:16 crc kubenswrapper[4716]: I1209 15:20:16.434650 4716 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d15321ce-4222-4dea-9cd4-addd3749023e-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:20:16 crc kubenswrapper[4716]: I1209 15:20:16.434662 4716 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d15321ce-4222-4dea-9cd4-addd3749023e-util\") on node \"crc\" DevicePath \"\"" Dec 09 15:20:16 crc kubenswrapper[4716]: I1209 15:20:16.888855 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rqn55" event={"ID":"d15321ce-4222-4dea-9cd4-addd3749023e","Type":"ContainerDied","Data":"3b6d9e80f4e5fd8f4663be068242830fe11e07d815c01abf7dfa38e34b7c5cb3"} Dec 09 15:20:16 crc kubenswrapper[4716]: I1209 15:20:16.888916 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3b6d9e80f4e5fd8f4663be068242830fe11e07d815c01abf7dfa38e34b7c5cb3" Dec 09 15:20:16 crc kubenswrapper[4716]: I1209 15:20:16.888992 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rqn55" Dec 09 15:20:20 crc kubenswrapper[4716]: I1209 15:20:20.659902 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-hcdn4"] Dec 09 15:20:20 crc kubenswrapper[4716]: I1209 15:20:20.660376 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="ovn-controller" containerID="cri-o://663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0" gracePeriod=30 Dec 09 15:20:20 crc kubenswrapper[4716]: I1209 15:20:20.660960 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="sbdb" containerID="cri-o://26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d" gracePeriod=30 Dec 09 15:20:20 crc kubenswrapper[4716]: I1209 15:20:20.661006 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="nbdb" containerID="cri-o://eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1" gracePeriod=30 Dec 09 15:20:20 crc kubenswrapper[4716]: I1209 15:20:20.661041 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="northd" containerID="cri-o://0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc" gracePeriod=30 Dec 09 15:20:20 crc kubenswrapper[4716]: I1209 15:20:20.661089 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09" gracePeriod=30 Dec 09 15:20:20 crc kubenswrapper[4716]: I1209 15:20:20.661136 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="kube-rbac-proxy-node" containerID="cri-o://14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f" gracePeriod=30 Dec 09 15:20:20 crc kubenswrapper[4716]: I1209 15:20:20.661183 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="ovn-acl-logging" containerID="cri-o://6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af" gracePeriod=30 Dec 09 15:20:20 crc kubenswrapper[4716]: I1209 15:20:20.771428 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="ovnkube-controller" containerID="cri-o://0b589e5aa8dc33433e843fe2d9be871a2c60c060d3dd99b4b8682b4b377c6b43" gracePeriod=30 Dec 09 15:20:20 crc kubenswrapper[4716]: E1209 15:20:20.930529 4716 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod38b4e174_ba72_4a0f_9eed_f2ce970c0afc.slice/crio-a758ac06c7e4b0f191b12fd2d9cd2ef906c39fedcb80562792f0ef219b1f9298.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3d953045_e94a_4e04_b78e_bc20b3a8c36c.slice/crio-663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0.scope\": RecentStats: unable to find data in memory cache]" Dec 09 15:20:20 crc kubenswrapper[4716]: I1209 15:20:20.933705 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rqz4n_38b4e174-ba72-4a0f-9eed-f2ce970c0afc/kube-multus/2.log" Dec 09 15:20:20 crc kubenswrapper[4716]: I1209 15:20:20.934481 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rqz4n_38b4e174-ba72-4a0f-9eed-f2ce970c0afc/kube-multus/1.log" Dec 09 15:20:20 crc kubenswrapper[4716]: I1209 15:20:20.934539 4716 generic.go:334] "Generic (PLEG): container finished" podID="38b4e174-ba72-4a0f-9eed-f2ce970c0afc" containerID="a758ac06c7e4b0f191b12fd2d9cd2ef906c39fedcb80562792f0ef219b1f9298" exitCode=2 Dec 09 15:20:20 crc kubenswrapper[4716]: I1209 15:20:20.934810 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rqz4n" event={"ID":"38b4e174-ba72-4a0f-9eed-f2ce970c0afc","Type":"ContainerDied","Data":"a758ac06c7e4b0f191b12fd2d9cd2ef906c39fedcb80562792f0ef219b1f9298"} Dec 09 15:20:20 crc kubenswrapper[4716]: I1209 15:20:20.934902 4716 scope.go:117] "RemoveContainer" containerID="a2fd13c7fc5ee8080d3513f100512c1ebd48490bbe1f36ff53396b7aa3d9526b" Dec 09 15:20:20 crc kubenswrapper[4716]: I1209 15:20:20.935766 4716 scope.go:117] "RemoveContainer" containerID="a758ac06c7e4b0f191b12fd2d9cd2ef906c39fedcb80562792f0ef219b1f9298" Dec 09 15:20:20 crc kubenswrapper[4716]: E1209 15:20:20.936215 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-rqz4n_openshift-multus(38b4e174-ba72-4a0f-9eed-f2ce970c0afc)\"" pod="openshift-multus/multus-rqz4n" podUID="38b4e174-ba72-4a0f-9eed-f2ce970c0afc" Dec 09 15:20:20 crc kubenswrapper[4716]: I1209 15:20:20.946866 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hcdn4_3d953045-e94a-4e04-b78e-bc20b3a8c36c/ovnkube-controller/3.log" Dec 09 15:20:20 crc kubenswrapper[4716]: I1209 15:20:20.979296 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hcdn4_3d953045-e94a-4e04-b78e-bc20b3a8c36c/ovn-acl-logging/0.log" Dec 09 15:20:20 crc kubenswrapper[4716]: I1209 15:20:20.980682 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hcdn4_3d953045-e94a-4e04-b78e-bc20b3a8c36c/ovn-controller/0.log" Dec 09 15:20:20 crc kubenswrapper[4716]: I1209 15:20:20.982951 4716 generic.go:334] "Generic (PLEG): container finished" podID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerID="6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af" exitCode=143 Dec 09 15:20:20 crc kubenswrapper[4716]: I1209 15:20:20.982985 4716 generic.go:334] "Generic (PLEG): container finished" podID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerID="663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0" exitCode=143 Dec 09 15:20:20 crc kubenswrapper[4716]: I1209 15:20:20.983017 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" event={"ID":"3d953045-e94a-4e04-b78e-bc20b3a8c36c","Type":"ContainerDied","Data":"6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af"} Dec 09 15:20:20 crc kubenswrapper[4716]: I1209 15:20:20.983064 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" event={"ID":"3d953045-e94a-4e04-b78e-bc20b3a8c36c","Type":"ContainerDied","Data":"663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0"} Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.872125 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hcdn4_3d953045-e94a-4e04-b78e-bc20b3a8c36c/ovnkube-controller/3.log" Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.875254 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hcdn4_3d953045-e94a-4e04-b78e-bc20b3a8c36c/ovn-acl-logging/0.log" Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.875948 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hcdn4_3d953045-e94a-4e04-b78e-bc20b3a8c36c/ovn-controller/0.log" Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.876455 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.947922 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-dp6p6"] Dec 09 15:20:21 crc kubenswrapper[4716]: E1209 15:20:21.948243 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="kubecfg-setup" Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.948263 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="kubecfg-setup" Dec 09 15:20:21 crc kubenswrapper[4716]: E1209 15:20:21.948274 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="kube-rbac-proxy-node" Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.948282 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="kube-rbac-proxy-node" Dec 09 15:20:21 crc kubenswrapper[4716]: E1209 15:20:21.948292 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="ovn-acl-logging" Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.948298 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="ovn-acl-logging" Dec 09 15:20:21 crc kubenswrapper[4716]: E1209 15:20:21.948310 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="ovnkube-controller" Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.948316 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="ovnkube-controller" Dec 09 15:20:21 crc kubenswrapper[4716]: E1209 15:20:21.948326 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="ovn-controller" Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.948333 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="ovn-controller" Dec 09 15:20:21 crc kubenswrapper[4716]: E1209 15:20:21.948343 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="ovnkube-controller" Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.948350 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="ovnkube-controller" Dec 09 15:20:21 crc kubenswrapper[4716]: E1209 15:20:21.948362 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="ovnkube-controller" Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.948369 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="ovnkube-controller" Dec 09 15:20:21 crc kubenswrapper[4716]: E1209 15:20:21.948378 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="sbdb" Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.948384 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="sbdb" Dec 09 15:20:21 crc kubenswrapper[4716]: E1209 15:20:21.948391 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d15321ce-4222-4dea-9cd4-addd3749023e" containerName="pull" Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.948397 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="d15321ce-4222-4dea-9cd4-addd3749023e" containerName="pull" Dec 09 15:20:21 crc kubenswrapper[4716]: E1209 15:20:21.948407 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="northd" Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.948414 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="northd" Dec 09 15:20:21 crc kubenswrapper[4716]: E1209 15:20:21.948423 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="nbdb" Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.948429 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="nbdb" Dec 09 15:20:21 crc kubenswrapper[4716]: E1209 15:20:21.948444 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d15321ce-4222-4dea-9cd4-addd3749023e" containerName="util" Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.948450 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="d15321ce-4222-4dea-9cd4-addd3749023e" containerName="util" Dec 09 15:20:21 crc kubenswrapper[4716]: E1209 15:20:21.948457 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="kube-rbac-proxy-ovn-metrics" Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.948463 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="kube-rbac-proxy-ovn-metrics" Dec 09 15:20:21 crc kubenswrapper[4716]: E1209 15:20:21.948472 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d15321ce-4222-4dea-9cd4-addd3749023e" containerName="extract" Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.948479 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="d15321ce-4222-4dea-9cd4-addd3749023e" containerName="extract" Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.948588 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="northd" Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.948600 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="ovnkube-controller" Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.948606 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="d15321ce-4222-4dea-9cd4-addd3749023e" containerName="extract" Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.948634 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="ovnkube-controller" Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.948642 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="kube-rbac-proxy-node" Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.948651 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="ovnkube-controller" Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.948658 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="nbdb" Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.948666 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="ovn-controller" Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.948673 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="ovn-acl-logging" Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.948681 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="ovnkube-controller" Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.948689 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="kube-rbac-proxy-ovn-metrics" Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.948697 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="sbdb" Dec 09 15:20:21 crc kubenswrapper[4716]: E1209 15:20:21.948806 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="ovnkube-controller" Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.948821 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="ovnkube-controller" Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.948968 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="ovnkube-controller" Dec 09 15:20:21 crc kubenswrapper[4716]: E1209 15:20:21.949087 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="ovnkube-controller" Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.949095 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerName="ovnkube-controller" Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.951298 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.992932 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rqz4n_38b4e174-ba72-4a0f-9eed-f2ce970c0afc/kube-multus/2.log" Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.995688 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hcdn4_3d953045-e94a-4e04-b78e-bc20b3a8c36c/ovnkube-controller/3.log" Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.999069 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hcdn4_3d953045-e94a-4e04-b78e-bc20b3a8c36c/ovn-acl-logging/0.log" Dec 09 15:20:21 crc kubenswrapper[4716]: I1209 15:20:21.999763 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hcdn4_3d953045-e94a-4e04-b78e-bc20b3a8c36c/ovn-controller/0.log" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.000299 4716 generic.go:334] "Generic (PLEG): container finished" podID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerID="0b589e5aa8dc33433e843fe2d9be871a2c60c060d3dd99b4b8682b4b377c6b43" exitCode=0 Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.000330 4716 generic.go:334] "Generic (PLEG): container finished" podID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerID="26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d" exitCode=0 Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.000339 4716 generic.go:334] "Generic (PLEG): container finished" podID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerID="eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1" exitCode=0 Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.000347 4716 generic.go:334] "Generic (PLEG): container finished" podID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerID="0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc" exitCode=0 Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.000377 4716 generic.go:334] "Generic (PLEG): container finished" podID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerID="b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09" exitCode=0 Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.000390 4716 generic.go:334] "Generic (PLEG): container finished" podID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" containerID="14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f" exitCode=0 Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.000410 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" event={"ID":"3d953045-e94a-4e04-b78e-bc20b3a8c36c","Type":"ContainerDied","Data":"0b589e5aa8dc33433e843fe2d9be871a2c60c060d3dd99b4b8682b4b377c6b43"} Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.000462 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.000505 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" event={"ID":"3d953045-e94a-4e04-b78e-bc20b3a8c36c","Type":"ContainerDied","Data":"26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d"} Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.000525 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" event={"ID":"3d953045-e94a-4e04-b78e-bc20b3a8c36c","Type":"ContainerDied","Data":"eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1"} Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.000538 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" event={"ID":"3d953045-e94a-4e04-b78e-bc20b3a8c36c","Type":"ContainerDied","Data":"0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc"} Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.000552 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" event={"ID":"3d953045-e94a-4e04-b78e-bc20b3a8c36c","Type":"ContainerDied","Data":"b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09"} Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.000565 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" event={"ID":"3d953045-e94a-4e04-b78e-bc20b3a8c36c","Type":"ContainerDied","Data":"14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f"} Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.000583 4716 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69"} Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.000603 4716 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d"} Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.000608 4716 scope.go:117] "RemoveContainer" containerID="0b589e5aa8dc33433e843fe2d9be871a2c60c060d3dd99b4b8682b4b377c6b43" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.000611 4716 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1"} Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.000722 4716 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc"} Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.000734 4716 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09"} Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.000741 4716 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f"} Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.000747 4716 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af"} Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.000755 4716 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0"} Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.000762 4716 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598"} Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.000772 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hcdn4" event={"ID":"3d953045-e94a-4e04-b78e-bc20b3a8c36c","Type":"ContainerDied","Data":"9094de3f2f9c5f9524e307d7f0fe1431a15578d89a0d93698a0754d687fcbdb4"} Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.000785 4716 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0b589e5aa8dc33433e843fe2d9be871a2c60c060d3dd99b4b8682b4b377c6b43"} Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.000793 4716 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69"} Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.000799 4716 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d"} Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.000805 4716 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1"} Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.000813 4716 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc"} Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.000819 4716 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09"} Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.000825 4716 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f"} Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.000832 4716 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af"} Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.000837 4716 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0"} Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.000844 4716 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598"} Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.030334 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3d953045-e94a-4e04-b78e-bc20b3a8c36c-ovnkube-script-lib\") pod \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.030426 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-run-openvswitch\") pod \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.030474 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-systemd-units\") pod \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.030512 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-kubelet\") pod \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.030538 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-slash\") pod \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.030565 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-log-socket\") pod \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.030652 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-54c5g\" (UniqueName: \"kubernetes.io/projected/3d953045-e94a-4e04-b78e-bc20b3a8c36c-kube-api-access-54c5g\") pod \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.030651 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "3d953045-e94a-4e04-b78e-bc20b3a8c36c" (UID: "3d953045-e94a-4e04-b78e-bc20b3a8c36c"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.030680 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3d953045-e94a-4e04-b78e-bc20b3a8c36c-env-overrides\") pod \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.030718 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-log-socket" (OuterVolumeSpecName: "log-socket") pod "3d953045-e94a-4e04-b78e-bc20b3a8c36c" (UID: "3d953045-e94a-4e04-b78e-bc20b3a8c36c"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.030673 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-slash" (OuterVolumeSpecName: "host-slash") pod "3d953045-e94a-4e04-b78e-bc20b3a8c36c" (UID: "3d953045-e94a-4e04-b78e-bc20b3a8c36c"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.030789 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "3d953045-e94a-4e04-b78e-bc20b3a8c36c" (UID: "3d953045-e94a-4e04-b78e-bc20b3a8c36c"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.030813 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-run-ovn-kubernetes\") pod \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.030877 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-cni-netd\") pod \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.030902 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-etc-openvswitch\") pod \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.030917 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "3d953045-e94a-4e04-b78e-bc20b3a8c36c" (UID: "3d953045-e94a-4e04-b78e-bc20b3a8c36c"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.030924 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "3d953045-e94a-4e04-b78e-bc20b3a8c36c" (UID: "3d953045-e94a-4e04-b78e-bc20b3a8c36c"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.030930 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-run-ovn\") pod \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.030974 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "3d953045-e94a-4e04-b78e-bc20b3a8c36c" (UID: "3d953045-e94a-4e04-b78e-bc20b3a8c36c"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.030956 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "3d953045-e94a-4e04-b78e-bc20b3a8c36c" (UID: "3d953045-e94a-4e04-b78e-bc20b3a8c36c"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.031005 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-cni-bin\") pod \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.031058 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-run-netns\") pod \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.031083 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-run-systemd\") pod \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.031100 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "3d953045-e94a-4e04-b78e-bc20b3a8c36c" (UID: "3d953045-e94a-4e04-b78e-bc20b3a8c36c"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.031109 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "3d953045-e94a-4e04-b78e-bc20b3a8c36c" (UID: "3d953045-e94a-4e04-b78e-bc20b3a8c36c"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.031164 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "3d953045-e94a-4e04-b78e-bc20b3a8c36c" (UID: "3d953045-e94a-4e04-b78e-bc20b3a8c36c"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.031128 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-var-lib-openvswitch\") pod \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.031237 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3d953045-e94a-4e04-b78e-bc20b3a8c36c-ovnkube-config\") pod \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.031277 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-node-log\") pod \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.031364 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d953045-e94a-4e04-b78e-bc20b3a8c36c-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "3d953045-e94a-4e04-b78e-bc20b3a8c36c" (UID: "3d953045-e94a-4e04-b78e-bc20b3a8c36c"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.031385 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3d953045-e94a-4e04-b78e-bc20b3a8c36c-ovn-node-metrics-cert\") pod \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.031420 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-var-lib-cni-networks-ovn-kubernetes\") pod \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\" (UID: \"3d953045-e94a-4e04-b78e-bc20b3a8c36c\") " Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.031717 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d953045-e94a-4e04-b78e-bc20b3a8c36c-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "3d953045-e94a-4e04-b78e-bc20b3a8c36c" (UID: "3d953045-e94a-4e04-b78e-bc20b3a8c36c"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.031759 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-node-log" (OuterVolumeSpecName: "node-log") pod "3d953045-e94a-4e04-b78e-bc20b3a8c36c" (UID: "3d953045-e94a-4e04-b78e-bc20b3a8c36c"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.031917 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "3d953045-e94a-4e04-b78e-bc20b3a8c36c" (UID: "3d953045-e94a-4e04-b78e-bc20b3a8c36c"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.031997 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "3d953045-e94a-4e04-b78e-bc20b3a8c36c" (UID: "3d953045-e94a-4e04-b78e-bc20b3a8c36c"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.032207 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d953045-e94a-4e04-b78e-bc20b3a8c36c-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "3d953045-e94a-4e04-b78e-bc20b3a8c36c" (UID: "3d953045-e94a-4e04-b78e-bc20b3a8c36c"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.032383 4716 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3d953045-e94a-4e04-b78e-bc20b3a8c36c-ovnkube-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.032404 4716 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-node-log\") on node \"crc\" DevicePath \"\"" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.032419 4716 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.032494 4716 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-run-openvswitch\") on node \"crc\" DevicePath \"\"" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.032507 4716 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-systemd-units\") on node \"crc\" DevicePath \"\"" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.032517 4716 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-kubelet\") on node \"crc\" DevicePath \"\"" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.032526 4716 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-slash\") on node \"crc\" DevicePath \"\"" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.032535 4716 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-log-socket\") on node \"crc\" DevicePath \"\"" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.032545 4716 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3d953045-e94a-4e04-b78e-bc20b3a8c36c-env-overrides\") on node \"crc\" DevicePath \"\"" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.032572 4716 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.032584 4716 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-cni-netd\") on node \"crc\" DevicePath \"\"" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.032592 4716 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.032601 4716 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-cni-bin\") on node \"crc\" DevicePath \"\"" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.032613 4716 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-run-ovn\") on node \"crc\" DevicePath \"\"" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.032679 4716 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-host-run-netns\") on node \"crc\" DevicePath \"\"" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.032690 4716 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.049549 4716 scope.go:117] "RemoveContainer" containerID="50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.050189 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d953045-e94a-4e04-b78e-bc20b3a8c36c-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "3d953045-e94a-4e04-b78e-bc20b3a8c36c" (UID: "3d953045-e94a-4e04-b78e-bc20b3a8c36c"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.080324 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d953045-e94a-4e04-b78e-bc20b3a8c36c-kube-api-access-54c5g" (OuterVolumeSpecName: "kube-api-access-54c5g") pod "3d953045-e94a-4e04-b78e-bc20b3a8c36c" (UID: "3d953045-e94a-4e04-b78e-bc20b3a8c36c"). InnerVolumeSpecName "kube-api-access-54c5g". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.092399 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "3d953045-e94a-4e04-b78e-bc20b3a8c36c" (UID: "3d953045-e94a-4e04-b78e-bc20b3a8c36c"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.134770 4716 scope.go:117] "RemoveContainer" containerID="26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.135828 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-var-lib-openvswitch\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.136415 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-log-socket\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.136466 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3db1e100-d381-488a-bcf5-88416496e5ef-ovn-node-metrics-cert\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.136554 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3db1e100-d381-488a-bcf5-88416496e5ef-ovnkube-script-lib\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.136610 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-node-log\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.136671 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.136697 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3db1e100-d381-488a-bcf5-88416496e5ef-ovnkube-config\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.136718 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-run-openvswitch\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.136761 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3db1e100-d381-488a-bcf5-88416496e5ef-env-overrides\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.136777 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-host-cni-bin\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.136793 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-run-ovn\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.136824 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-host-kubelet\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.136855 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-host-slash\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.136870 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-etc-openvswitch\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.136891 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-run-systemd\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.137029 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5b7d2\" (UniqueName: \"kubernetes.io/projected/3db1e100-d381-488a-bcf5-88416496e5ef-kube-api-access-5b7d2\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.137116 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-host-cni-netd\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.137157 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-systemd-units\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.137200 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-host-run-netns\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.137223 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-host-run-ovn-kubernetes\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.137272 4716 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3d953045-e94a-4e04-b78e-bc20b3a8c36c-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.137284 4716 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3d953045-e94a-4e04-b78e-bc20b3a8c36c-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.137296 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-54c5g\" (UniqueName: \"kubernetes.io/projected/3d953045-e94a-4e04-b78e-bc20b3a8c36c-kube-api-access-54c5g\") on node \"crc\" DevicePath \"\"" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.137306 4716 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3d953045-e94a-4e04-b78e-bc20b3a8c36c-run-systemd\") on node \"crc\" DevicePath \"\"" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.161814 4716 scope.go:117] "RemoveContainer" containerID="eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.179843 4716 scope.go:117] "RemoveContainer" containerID="0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.196711 4716 scope.go:117] "RemoveContainer" containerID="b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.214556 4716 scope.go:117] "RemoveContainer" containerID="14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.231380 4716 scope.go:117] "RemoveContainer" containerID="6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.239483 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3db1e100-d381-488a-bcf5-88416496e5ef-env-overrides\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.239564 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-host-cni-bin\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.239602 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-run-ovn\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.239674 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-host-kubelet\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.239736 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-host-cni-bin\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.239754 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-host-slash\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.239785 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-etc-openvswitch\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.239804 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-host-kubelet\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.239809 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-run-systemd\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.239836 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-run-ovn\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.239848 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5b7d2\" (UniqueName: \"kubernetes.io/projected/3db1e100-d381-488a-bcf5-88416496e5ef-kube-api-access-5b7d2\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.239871 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-etc-openvswitch\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.239880 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-host-cni-netd\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.239908 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-systemd-units\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.239920 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-host-slash\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.239943 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-host-run-netns\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.239970 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-host-run-ovn-kubernetes\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.240000 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-var-lib-openvswitch\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.240012 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-host-cni-netd\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.240027 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-log-socket\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.240056 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-run-systemd\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.240059 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3db1e100-d381-488a-bcf5-88416496e5ef-ovn-node-metrics-cert\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.240108 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3db1e100-d381-488a-bcf5-88416496e5ef-ovnkube-script-lib\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.240134 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-node-log\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.240169 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.240196 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3db1e100-d381-488a-bcf5-88416496e5ef-ovnkube-config\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.240190 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3db1e100-d381-488a-bcf5-88416496e5ef-env-overrides\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.240219 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-run-openvswitch\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.240298 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-run-openvswitch\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.240421 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.240968 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-host-run-ovn-kubernetes\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.240978 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-systemd-units\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.241012 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-host-run-netns\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.241028 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-log-socket\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.241048 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-node-log\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.241081 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3db1e100-d381-488a-bcf5-88416496e5ef-var-lib-openvswitch\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.241095 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3db1e100-d381-488a-bcf5-88416496e5ef-ovnkube-script-lib\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.243522 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3db1e100-d381-488a-bcf5-88416496e5ef-ovn-node-metrics-cert\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.244316 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3db1e100-d381-488a-bcf5-88416496e5ef-ovnkube-config\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.249573 4716 scope.go:117] "RemoveContainer" containerID="663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.262510 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5b7d2\" (UniqueName: \"kubernetes.io/projected/3db1e100-d381-488a-bcf5-88416496e5ef-kube-api-access-5b7d2\") pod \"ovnkube-node-dp6p6\" (UID: \"3db1e100-d381-488a-bcf5-88416496e5ef\") " pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.268878 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.279919 4716 scope.go:117] "RemoveContainer" containerID="5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.346033 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-hcdn4"] Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.346588 4716 scope.go:117] "RemoveContainer" containerID="0b589e5aa8dc33433e843fe2d9be871a2c60c060d3dd99b4b8682b4b377c6b43" Dec 09 15:20:22 crc kubenswrapper[4716]: E1209 15:20:22.347650 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0b589e5aa8dc33433e843fe2d9be871a2c60c060d3dd99b4b8682b4b377c6b43\": container with ID starting with 0b589e5aa8dc33433e843fe2d9be871a2c60c060d3dd99b4b8682b4b377c6b43 not found: ID does not exist" containerID="0b589e5aa8dc33433e843fe2d9be871a2c60c060d3dd99b4b8682b4b377c6b43" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.347724 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0b589e5aa8dc33433e843fe2d9be871a2c60c060d3dd99b4b8682b4b377c6b43"} err="failed to get container status \"0b589e5aa8dc33433e843fe2d9be871a2c60c060d3dd99b4b8682b4b377c6b43\": rpc error: code = NotFound desc = could not find container \"0b589e5aa8dc33433e843fe2d9be871a2c60c060d3dd99b4b8682b4b377c6b43\": container with ID starting with 0b589e5aa8dc33433e843fe2d9be871a2c60c060d3dd99b4b8682b4b377c6b43 not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.347771 4716 scope.go:117] "RemoveContainer" containerID="50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69" Dec 09 15:20:22 crc kubenswrapper[4716]: E1209 15:20:22.350961 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69\": container with ID starting with 50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69 not found: ID does not exist" containerID="50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.351013 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69"} err="failed to get container status \"50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69\": rpc error: code = NotFound desc = could not find container \"50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69\": container with ID starting with 50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69 not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.351041 4716 scope.go:117] "RemoveContainer" containerID="26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d" Dec 09 15:20:22 crc kubenswrapper[4716]: E1209 15:20:22.355814 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d\": container with ID starting with 26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d not found: ID does not exist" containerID="26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.355875 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d"} err="failed to get container status \"26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d\": rpc error: code = NotFound desc = could not find container \"26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d\": container with ID starting with 26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.355918 4716 scope.go:117] "RemoveContainer" containerID="eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.358721 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-hcdn4"] Dec 09 15:20:22 crc kubenswrapper[4716]: E1209 15:20:22.360223 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1\": container with ID starting with eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1 not found: ID does not exist" containerID="eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.360270 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1"} err="failed to get container status \"eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1\": rpc error: code = NotFound desc = could not find container \"eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1\": container with ID starting with eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1 not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.360310 4716 scope.go:117] "RemoveContainer" containerID="0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc" Dec 09 15:20:22 crc kubenswrapper[4716]: E1209 15:20:22.360633 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc\": container with ID starting with 0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc not found: ID does not exist" containerID="0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.360661 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc"} err="failed to get container status \"0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc\": rpc error: code = NotFound desc = could not find container \"0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc\": container with ID starting with 0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.360679 4716 scope.go:117] "RemoveContainer" containerID="b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09" Dec 09 15:20:22 crc kubenswrapper[4716]: E1209 15:20:22.360873 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09\": container with ID starting with b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09 not found: ID does not exist" containerID="b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.360895 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09"} err="failed to get container status \"b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09\": rpc error: code = NotFound desc = could not find container \"b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09\": container with ID starting with b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09 not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.360909 4716 scope.go:117] "RemoveContainer" containerID="14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f" Dec 09 15:20:22 crc kubenswrapper[4716]: E1209 15:20:22.361102 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f\": container with ID starting with 14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f not found: ID does not exist" containerID="14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.361124 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f"} err="failed to get container status \"14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f\": rpc error: code = NotFound desc = could not find container \"14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f\": container with ID starting with 14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.361137 4716 scope.go:117] "RemoveContainer" containerID="6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af" Dec 09 15:20:22 crc kubenswrapper[4716]: E1209 15:20:22.361321 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af\": container with ID starting with 6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af not found: ID does not exist" containerID="6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.361343 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af"} err="failed to get container status \"6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af\": rpc error: code = NotFound desc = could not find container \"6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af\": container with ID starting with 6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.361361 4716 scope.go:117] "RemoveContainer" containerID="663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0" Dec 09 15:20:22 crc kubenswrapper[4716]: E1209 15:20:22.361534 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0\": container with ID starting with 663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0 not found: ID does not exist" containerID="663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.361555 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0"} err="failed to get container status \"663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0\": rpc error: code = NotFound desc = could not find container \"663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0\": container with ID starting with 663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0 not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.361568 4716 scope.go:117] "RemoveContainer" containerID="5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598" Dec 09 15:20:22 crc kubenswrapper[4716]: E1209 15:20:22.361821 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\": container with ID starting with 5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598 not found: ID does not exist" containerID="5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.361845 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598"} err="failed to get container status \"5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\": rpc error: code = NotFound desc = could not find container \"5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\": container with ID starting with 5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598 not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.361887 4716 scope.go:117] "RemoveContainer" containerID="0b589e5aa8dc33433e843fe2d9be871a2c60c060d3dd99b4b8682b4b377c6b43" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.362123 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0b589e5aa8dc33433e843fe2d9be871a2c60c060d3dd99b4b8682b4b377c6b43"} err="failed to get container status \"0b589e5aa8dc33433e843fe2d9be871a2c60c060d3dd99b4b8682b4b377c6b43\": rpc error: code = NotFound desc = could not find container \"0b589e5aa8dc33433e843fe2d9be871a2c60c060d3dd99b4b8682b4b377c6b43\": container with ID starting with 0b589e5aa8dc33433e843fe2d9be871a2c60c060d3dd99b4b8682b4b377c6b43 not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.362144 4716 scope.go:117] "RemoveContainer" containerID="50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.362323 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69"} err="failed to get container status \"50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69\": rpc error: code = NotFound desc = could not find container \"50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69\": container with ID starting with 50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69 not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.362343 4716 scope.go:117] "RemoveContainer" containerID="26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.362534 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d"} err="failed to get container status \"26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d\": rpc error: code = NotFound desc = could not find container \"26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d\": container with ID starting with 26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.362556 4716 scope.go:117] "RemoveContainer" containerID="eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.362784 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1"} err="failed to get container status \"eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1\": rpc error: code = NotFound desc = could not find container \"eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1\": container with ID starting with eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1 not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.362807 4716 scope.go:117] "RemoveContainer" containerID="0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.362987 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc"} err="failed to get container status \"0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc\": rpc error: code = NotFound desc = could not find container \"0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc\": container with ID starting with 0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.363009 4716 scope.go:117] "RemoveContainer" containerID="b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.363178 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09"} err="failed to get container status \"b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09\": rpc error: code = NotFound desc = could not find container \"b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09\": container with ID starting with b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09 not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.363197 4716 scope.go:117] "RemoveContainer" containerID="14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.363404 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f"} err="failed to get container status \"14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f\": rpc error: code = NotFound desc = could not find container \"14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f\": container with ID starting with 14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.363429 4716 scope.go:117] "RemoveContainer" containerID="6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.363604 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af"} err="failed to get container status \"6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af\": rpc error: code = NotFound desc = could not find container \"6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af\": container with ID starting with 6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.363638 4716 scope.go:117] "RemoveContainer" containerID="663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.363836 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0"} err="failed to get container status \"663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0\": rpc error: code = NotFound desc = could not find container \"663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0\": container with ID starting with 663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0 not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.363862 4716 scope.go:117] "RemoveContainer" containerID="5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.364085 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598"} err="failed to get container status \"5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\": rpc error: code = NotFound desc = could not find container \"5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\": container with ID starting with 5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598 not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.364115 4716 scope.go:117] "RemoveContainer" containerID="0b589e5aa8dc33433e843fe2d9be871a2c60c060d3dd99b4b8682b4b377c6b43" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.364334 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0b589e5aa8dc33433e843fe2d9be871a2c60c060d3dd99b4b8682b4b377c6b43"} err="failed to get container status \"0b589e5aa8dc33433e843fe2d9be871a2c60c060d3dd99b4b8682b4b377c6b43\": rpc error: code = NotFound desc = could not find container \"0b589e5aa8dc33433e843fe2d9be871a2c60c060d3dd99b4b8682b4b377c6b43\": container with ID starting with 0b589e5aa8dc33433e843fe2d9be871a2c60c060d3dd99b4b8682b4b377c6b43 not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.364358 4716 scope.go:117] "RemoveContainer" containerID="50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.364576 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69"} err="failed to get container status \"50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69\": rpc error: code = NotFound desc = could not find container \"50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69\": container with ID starting with 50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69 not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.364605 4716 scope.go:117] "RemoveContainer" containerID="26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.364837 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d"} err="failed to get container status \"26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d\": rpc error: code = NotFound desc = could not find container \"26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d\": container with ID starting with 26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.364859 4716 scope.go:117] "RemoveContainer" containerID="eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.365565 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1"} err="failed to get container status \"eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1\": rpc error: code = NotFound desc = could not find container \"eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1\": container with ID starting with eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1 not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.365598 4716 scope.go:117] "RemoveContainer" containerID="0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.365806 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc"} err="failed to get container status \"0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc\": rpc error: code = NotFound desc = could not find container \"0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc\": container with ID starting with 0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.365829 4716 scope.go:117] "RemoveContainer" containerID="b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.366035 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09"} err="failed to get container status \"b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09\": rpc error: code = NotFound desc = could not find container \"b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09\": container with ID starting with b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09 not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.366058 4716 scope.go:117] "RemoveContainer" containerID="14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.366402 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f"} err="failed to get container status \"14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f\": rpc error: code = NotFound desc = could not find container \"14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f\": container with ID starting with 14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.366428 4716 scope.go:117] "RemoveContainer" containerID="6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.366747 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af"} err="failed to get container status \"6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af\": rpc error: code = NotFound desc = could not find container \"6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af\": container with ID starting with 6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.366788 4716 scope.go:117] "RemoveContainer" containerID="663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.367350 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0"} err="failed to get container status \"663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0\": rpc error: code = NotFound desc = could not find container \"663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0\": container with ID starting with 663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0 not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.367447 4716 scope.go:117] "RemoveContainer" containerID="5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.367835 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598"} err="failed to get container status \"5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\": rpc error: code = NotFound desc = could not find container \"5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\": container with ID starting with 5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598 not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.367862 4716 scope.go:117] "RemoveContainer" containerID="0b589e5aa8dc33433e843fe2d9be871a2c60c060d3dd99b4b8682b4b377c6b43" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.368186 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0b589e5aa8dc33433e843fe2d9be871a2c60c060d3dd99b4b8682b4b377c6b43"} err="failed to get container status \"0b589e5aa8dc33433e843fe2d9be871a2c60c060d3dd99b4b8682b4b377c6b43\": rpc error: code = NotFound desc = could not find container \"0b589e5aa8dc33433e843fe2d9be871a2c60c060d3dd99b4b8682b4b377c6b43\": container with ID starting with 0b589e5aa8dc33433e843fe2d9be871a2c60c060d3dd99b4b8682b4b377c6b43 not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.368222 4716 scope.go:117] "RemoveContainer" containerID="50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.368564 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69"} err="failed to get container status \"50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69\": rpc error: code = NotFound desc = could not find container \"50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69\": container with ID starting with 50bbadfb1351cf6817d88c5426c0fae08a11ad926ba40a3a6e067988f2aa3c69 not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.368598 4716 scope.go:117] "RemoveContainer" containerID="26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.368957 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d"} err="failed to get container status \"26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d\": rpc error: code = NotFound desc = could not find container \"26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d\": container with ID starting with 26861ba64c4e3e94beaaeec5f5c8b191f0366f70da1c86019a6774c482de8c4d not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.368985 4716 scope.go:117] "RemoveContainer" containerID="eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.369406 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1"} err="failed to get container status \"eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1\": rpc error: code = NotFound desc = could not find container \"eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1\": container with ID starting with eb976c0ddad924945129152242e752ba0a68171d3a623416f90e95db5c26e9b1 not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.369477 4716 scope.go:117] "RemoveContainer" containerID="0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.369820 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc"} err="failed to get container status \"0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc\": rpc error: code = NotFound desc = could not find container \"0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc\": container with ID starting with 0bcb29d8233138e1c2fdca81c70c950e534e1eb4e265e0b71c6e6ca74b8db1dc not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.369845 4716 scope.go:117] "RemoveContainer" containerID="b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.370309 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09"} err="failed to get container status \"b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09\": rpc error: code = NotFound desc = could not find container \"b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09\": container with ID starting with b379004714918e9c67ef8a941245b42810551b46c49e34e472fdd4746c2e7e09 not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.370349 4716 scope.go:117] "RemoveContainer" containerID="14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.371851 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f"} err="failed to get container status \"14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f\": rpc error: code = NotFound desc = could not find container \"14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f\": container with ID starting with 14abdf3286d4efc5419c1f4c0879db845142ece7a2111a1b210935aba018093f not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.371878 4716 scope.go:117] "RemoveContainer" containerID="6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.372179 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af"} err="failed to get container status \"6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af\": rpc error: code = NotFound desc = could not find container \"6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af\": container with ID starting with 6ec976f901f8915e8fad65503fa62459642b4e7c6ddb02cbd77695e052f606af not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.372209 4716 scope.go:117] "RemoveContainer" containerID="663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.372519 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0"} err="failed to get container status \"663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0\": rpc error: code = NotFound desc = could not find container \"663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0\": container with ID starting with 663a4de82ccaf06b8bc283c82aea302445417740d0a46e64fc9bf7bb8d5248c0 not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.372554 4716 scope.go:117] "RemoveContainer" containerID="5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.372815 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598"} err="failed to get container status \"5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\": rpc error: code = NotFound desc = could not find container \"5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598\": container with ID starting with 5987d6973999c8f4209ab0e1387a4aa77253fe5a2f16387cd77d1d96e5830598 not found: ID does not exist" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.372833 4716 scope.go:117] "RemoveContainer" containerID="0b589e5aa8dc33433e843fe2d9be871a2c60c060d3dd99b4b8682b4b377c6b43" Dec 09 15:20:22 crc kubenswrapper[4716]: I1209 15:20:22.373489 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0b589e5aa8dc33433e843fe2d9be871a2c60c060d3dd99b4b8682b4b377c6b43"} err="failed to get container status \"0b589e5aa8dc33433e843fe2d9be871a2c60c060d3dd99b4b8682b4b377c6b43\": rpc error: code = NotFound desc = could not find container \"0b589e5aa8dc33433e843fe2d9be871a2c60c060d3dd99b4b8682b4b377c6b43\": container with ID starting with 0b589e5aa8dc33433e843fe2d9be871a2c60c060d3dd99b4b8682b4b377c6b43 not found: ID does not exist" Dec 09 15:20:23 crc kubenswrapper[4716]: I1209 15:20:23.008851 4716 generic.go:334] "Generic (PLEG): container finished" podID="3db1e100-d381-488a-bcf5-88416496e5ef" containerID="2f05a3314a9748fe0861a60f5e26df73d423b00da17a0d7ad009e91a8f71822b" exitCode=0 Dec 09 15:20:23 crc kubenswrapper[4716]: I1209 15:20:23.008943 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" event={"ID":"3db1e100-d381-488a-bcf5-88416496e5ef","Type":"ContainerDied","Data":"2f05a3314a9748fe0861a60f5e26df73d423b00da17a0d7ad009e91a8f71822b"} Dec 09 15:20:23 crc kubenswrapper[4716]: I1209 15:20:23.009364 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" event={"ID":"3db1e100-d381-488a-bcf5-88416496e5ef","Type":"ContainerStarted","Data":"572a7ddeb9723e6aa140e93c89b6ec7d29787bf987e7a78c4cc04f987d6ba198"} Dec 09 15:20:23 crc kubenswrapper[4716]: I1209 15:20:23.223615 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d953045-e94a-4e04-b78e-bc20b3a8c36c" path="/var/lib/kubelet/pods/3d953045-e94a-4e04-b78e-bc20b3a8c36c/volumes" Dec 09 15:20:24 crc kubenswrapper[4716]: I1209 15:20:24.280233 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" event={"ID":"3db1e100-d381-488a-bcf5-88416496e5ef","Type":"ContainerStarted","Data":"248ba9577ebb0bbb57d9f76c312b3716f101d1b10aa5a73166087925e1f5235b"} Dec 09 15:20:24 crc kubenswrapper[4716]: I1209 15:20:24.281764 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" event={"ID":"3db1e100-d381-488a-bcf5-88416496e5ef","Type":"ContainerStarted","Data":"21bb92b61d04bed11d4eed99d77d8c05ffd45919e73ad01b8ee6f5674e7f9143"} Dec 09 15:20:24 crc kubenswrapper[4716]: I1209 15:20:24.281868 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" event={"ID":"3db1e100-d381-488a-bcf5-88416496e5ef","Type":"ContainerStarted","Data":"3787bd5e046ad50639c18d9c8cd4f276b4d69e4f314c68987fc2363fac888d11"} Dec 09 15:20:24 crc kubenswrapper[4716]: I1209 15:20:24.281931 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" event={"ID":"3db1e100-d381-488a-bcf5-88416496e5ef","Type":"ContainerStarted","Data":"687b7de5f13398000fd9fa033717b8c22733052c741445d24a5ee0ecb7037563"} Dec 09 15:20:24 crc kubenswrapper[4716]: I1209 15:20:24.281994 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" event={"ID":"3db1e100-d381-488a-bcf5-88416496e5ef","Type":"ContainerStarted","Data":"482207ce05005b8ab64dbd4b91be78dabb692d0b96ed7fce651e057efea078cf"} Dec 09 15:20:25 crc kubenswrapper[4716]: I1209 15:20:25.296164 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" event={"ID":"3db1e100-d381-488a-bcf5-88416496e5ef","Type":"ContainerStarted","Data":"cacd16bceede747a35ff0e21bcae2a53ce1306b9683082a31c7a1e65d8253e7c"} Dec 09 15:20:27 crc kubenswrapper[4716]: I1209 15:20:27.321739 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" event={"ID":"3db1e100-d381-488a-bcf5-88416496e5ef","Type":"ContainerStarted","Data":"fa39d2c0319db67f0fc62143522855461976f3612327eb0f9f24f02e4a7d0131"} Dec 09 15:20:29 crc kubenswrapper[4716]: I1209 15:20:29.635634 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-jft8l"] Dec 09 15:20:29 crc kubenswrapper[4716]: I1209 15:20:29.638125 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jft8l" Dec 09 15:20:29 crc kubenswrapper[4716]: I1209 15:20:29.644864 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-2j5f5" Dec 09 15:20:29 crc kubenswrapper[4716]: I1209 15:20:29.645469 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Dec 09 15:20:29 crc kubenswrapper[4716]: I1209 15:20:29.645565 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Dec 09 15:20:29 crc kubenswrapper[4716]: I1209 15:20:29.763750 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bwx9p\" (UniqueName: \"kubernetes.io/projected/2631330f-8710-4364-a3ea-6b0455e189f5-kube-api-access-bwx9p\") pod \"obo-prometheus-operator-668cf9dfbb-jft8l\" (UID: \"2631330f-8710-4364-a3ea-6b0455e189f5\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jft8l" Dec 09 15:20:29 crc kubenswrapper[4716]: I1209 15:20:29.802954 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" event={"ID":"3db1e100-d381-488a-bcf5-88416496e5ef","Type":"ContainerStarted","Data":"72aafa90a502f6672b95303f01174ba06f37a8ea924ea04c37352aef45e2da5e"} Dec 09 15:20:29 crc kubenswrapper[4716]: I1209 15:20:29.803481 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:29 crc kubenswrapper[4716]: I1209 15:20:29.803540 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:29 crc kubenswrapper[4716]: I1209 15:20:29.833172 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz"] Dec 09 15:20:29 crc kubenswrapper[4716]: I1209 15:20:29.834552 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz" Dec 09 15:20:29 crc kubenswrapper[4716]: I1209 15:20:29.840837 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Dec 09 15:20:29 crc kubenswrapper[4716]: I1209 15:20:29.841230 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-xbmv2" Dec 09 15:20:29 crc kubenswrapper[4716]: I1209 15:20:29.858386 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:29 crc kubenswrapper[4716]: I1209 15:20:29.867327 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp"] Dec 09 15:20:29 crc kubenswrapper[4716]: I1209 15:20:29.867837 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bwx9p\" (UniqueName: \"kubernetes.io/projected/2631330f-8710-4364-a3ea-6b0455e189f5-kube-api-access-bwx9p\") pod \"obo-prometheus-operator-668cf9dfbb-jft8l\" (UID: \"2631330f-8710-4364-a3ea-6b0455e189f5\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jft8l" Dec 09 15:20:29 crc kubenswrapper[4716]: I1209 15:20:29.867965 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a85a65c4-faa1-47a5-849c-0715edb9e29d-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz\" (UID: \"a85a65c4-faa1-47a5-849c-0715edb9e29d\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz" Dec 09 15:20:29 crc kubenswrapper[4716]: I1209 15:20:29.868053 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a85a65c4-faa1-47a5-849c-0715edb9e29d-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz\" (UID: \"a85a65c4-faa1-47a5-849c-0715edb9e29d\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz" Dec 09 15:20:29 crc kubenswrapper[4716]: I1209 15:20:29.871984 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp" Dec 09 15:20:29 crc kubenswrapper[4716]: I1209 15:20:29.896717 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" podStartSLOduration=8.896675339 podStartE2EDuration="8.896675339s" podCreationTimestamp="2025-12-09 15:20:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:20:29.889310474 +0000 UTC m=+717.044054462" watchObservedRunningTime="2025-12-09 15:20:29.896675339 +0000 UTC m=+717.051419337" Dec 09 15:20:29 crc kubenswrapper[4716]: I1209 15:20:29.925895 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bwx9p\" (UniqueName: \"kubernetes.io/projected/2631330f-8710-4364-a3ea-6b0455e189f5-kube-api-access-bwx9p\") pod \"obo-prometheus-operator-668cf9dfbb-jft8l\" (UID: \"2631330f-8710-4364-a3ea-6b0455e189f5\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jft8l" Dec 09 15:20:29 crc kubenswrapper[4716]: I1209 15:20:29.964331 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jft8l" Dec 09 15:20:29 crc kubenswrapper[4716]: I1209 15:20:29.970110 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a85a65c4-faa1-47a5-849c-0715edb9e29d-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz\" (UID: \"a85a65c4-faa1-47a5-849c-0715edb9e29d\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz" Dec 09 15:20:29 crc kubenswrapper[4716]: I1209 15:20:29.970236 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a85a65c4-faa1-47a5-849c-0715edb9e29d-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz\" (UID: \"a85a65c4-faa1-47a5-849c-0715edb9e29d\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz" Dec 09 15:20:29 crc kubenswrapper[4716]: I1209 15:20:29.971127 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ecae64e4-38fd-4c35-918b-fab84d70ad07-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp\" (UID: \"ecae64e4-38fd-4c35-918b-fab84d70ad07\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp" Dec 09 15:20:29 crc kubenswrapper[4716]: I1209 15:20:29.971204 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ecae64e4-38fd-4c35-918b-fab84d70ad07-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp\" (UID: \"ecae64e4-38fd-4c35-918b-fab84d70ad07\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp" Dec 09 15:20:29 crc kubenswrapper[4716]: I1209 15:20:29.978537 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a85a65c4-faa1-47a5-849c-0715edb9e29d-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz\" (UID: \"a85a65c4-faa1-47a5-849c-0715edb9e29d\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz" Dec 09 15:20:29 crc kubenswrapper[4716]: I1209 15:20:29.980872 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a85a65c4-faa1-47a5-849c-0715edb9e29d-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz\" (UID: \"a85a65c4-faa1-47a5-849c-0715edb9e29d\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz" Dec 09 15:20:30 crc kubenswrapper[4716]: I1209 15:20:30.073237 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ecae64e4-38fd-4c35-918b-fab84d70ad07-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp\" (UID: \"ecae64e4-38fd-4c35-918b-fab84d70ad07\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp" Dec 09 15:20:30 crc kubenswrapper[4716]: I1209 15:20:30.073995 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ecae64e4-38fd-4c35-918b-fab84d70ad07-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp\" (UID: \"ecae64e4-38fd-4c35-918b-fab84d70ad07\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp" Dec 09 15:20:30 crc kubenswrapper[4716]: I1209 15:20:30.081439 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ecae64e4-38fd-4c35-918b-fab84d70ad07-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp\" (UID: \"ecae64e4-38fd-4c35-918b-fab84d70ad07\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp" Dec 09 15:20:30 crc kubenswrapper[4716]: I1209 15:20:30.081468 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ecae64e4-38fd-4c35-918b-fab84d70ad07-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp\" (UID: \"ecae64e4-38fd-4c35-918b-fab84d70ad07\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp" Dec 09 15:20:30 crc kubenswrapper[4716]: I1209 15:20:30.611309 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp" Dec 09 15:20:30 crc kubenswrapper[4716]: I1209 15:20:30.611828 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz" Dec 09 15:20:30 crc kubenswrapper[4716]: E1209 15:20:30.638919 4716 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-jft8l_openshift-operators_2631330f-8710-4364-a3ea-6b0455e189f5_0(166395ede2586855d33f876efbab292447e09c6e1e8bb1bda13c2985a18fe9a7): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 09 15:20:30 crc kubenswrapper[4716]: E1209 15:20:30.639043 4716 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-jft8l_openshift-operators_2631330f-8710-4364-a3ea-6b0455e189f5_0(166395ede2586855d33f876efbab292447e09c6e1e8bb1bda13c2985a18fe9a7): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jft8l" Dec 09 15:20:30 crc kubenswrapper[4716]: E1209 15:20:30.639073 4716 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-jft8l_openshift-operators_2631330f-8710-4364-a3ea-6b0455e189f5_0(166395ede2586855d33f876efbab292447e09c6e1e8bb1bda13c2985a18fe9a7): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jft8l" Dec 09 15:20:30 crc kubenswrapper[4716]: E1209 15:20:30.639126 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-668cf9dfbb-jft8l_openshift-operators(2631330f-8710-4364-a3ea-6b0455e189f5)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-668cf9dfbb-jft8l_openshift-operators(2631330f-8710-4364-a3ea-6b0455e189f5)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-jft8l_openshift-operators_2631330f-8710-4364-a3ea-6b0455e189f5_0(166395ede2586855d33f876efbab292447e09c6e1e8bb1bda13c2985a18fe9a7): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jft8l" podUID="2631330f-8710-4364-a3ea-6b0455e189f5" Dec 09 15:20:30 crc kubenswrapper[4716]: I1209 15:20:30.643035 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-hlfs7"] Dec 09 15:20:30 crc kubenswrapper[4716]: I1209 15:20:30.644421 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-hlfs7" Dec 09 15:20:30 crc kubenswrapper[4716]: I1209 15:20:30.662878 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Dec 09 15:20:30 crc kubenswrapper[4716]: I1209 15:20:30.663190 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-7mslg" Dec 09 15:20:30 crc kubenswrapper[4716]: I1209 15:20:30.718041 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-95vpq\" (UniqueName: \"kubernetes.io/projected/ba73b247-86e8-4b3c-977f-a4bffb4f44e5-kube-api-access-95vpq\") pod \"observability-operator-d8bb48f5d-hlfs7\" (UID: \"ba73b247-86e8-4b3c-977f-a4bffb4f44e5\") " pod="openshift-operators/observability-operator-d8bb48f5d-hlfs7" Dec 09 15:20:30 crc kubenswrapper[4716]: I1209 15:20:30.718177 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/ba73b247-86e8-4b3c-977f-a4bffb4f44e5-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-hlfs7\" (UID: \"ba73b247-86e8-4b3c-977f-a4bffb4f44e5\") " pod="openshift-operators/observability-operator-d8bb48f5d-hlfs7" Dec 09 15:20:30 crc kubenswrapper[4716]: E1209 15:20:30.733833 4716 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp_openshift-operators_ecae64e4-38fd-4c35-918b-fab84d70ad07_0(dbd1b15372b91218ee82da1a98d6b85ddc99ba0ed650825346effb17d069e5c6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 09 15:20:30 crc kubenswrapper[4716]: E1209 15:20:30.733935 4716 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp_openshift-operators_ecae64e4-38fd-4c35-918b-fab84d70ad07_0(dbd1b15372b91218ee82da1a98d6b85ddc99ba0ed650825346effb17d069e5c6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp" Dec 09 15:20:30 crc kubenswrapper[4716]: E1209 15:20:30.733962 4716 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp_openshift-operators_ecae64e4-38fd-4c35-918b-fab84d70ad07_0(dbd1b15372b91218ee82da1a98d6b85ddc99ba0ed650825346effb17d069e5c6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp" Dec 09 15:20:30 crc kubenswrapper[4716]: E1209 15:20:30.734016 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp_openshift-operators(ecae64e4-38fd-4c35-918b-fab84d70ad07)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp_openshift-operators(ecae64e4-38fd-4c35-918b-fab84d70ad07)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp_openshift-operators_ecae64e4-38fd-4c35-918b-fab84d70ad07_0(dbd1b15372b91218ee82da1a98d6b85ddc99ba0ed650825346effb17d069e5c6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp" podUID="ecae64e4-38fd-4c35-918b-fab84d70ad07" Dec 09 15:20:30 crc kubenswrapper[4716]: I1209 15:20:30.763993 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-5446b9c989-jxhr5"] Dec 09 15:20:30 crc kubenswrapper[4716]: I1209 15:20:30.777791 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-jxhr5" Dec 09 15:20:30 crc kubenswrapper[4716]: I1209 15:20:30.779655 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-psm8n" Dec 09 15:20:30 crc kubenswrapper[4716]: E1209 15:20:30.785878 4716 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz_openshift-operators_a85a65c4-faa1-47a5-849c-0715edb9e29d_0(b134dac5ba575fcd9aeb9ffe52f84a1e546ed09dca5d0b91f4f59b5820cfc692): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 09 15:20:30 crc kubenswrapper[4716]: E1209 15:20:30.785944 4716 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz_openshift-operators_a85a65c4-faa1-47a5-849c-0715edb9e29d_0(b134dac5ba575fcd9aeb9ffe52f84a1e546ed09dca5d0b91f4f59b5820cfc692): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz" Dec 09 15:20:30 crc kubenswrapper[4716]: E1209 15:20:30.785972 4716 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz_openshift-operators_a85a65c4-faa1-47a5-849c-0715edb9e29d_0(b134dac5ba575fcd9aeb9ffe52f84a1e546ed09dca5d0b91f4f59b5820cfc692): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz" Dec 09 15:20:30 crc kubenswrapper[4716]: E1209 15:20:30.786027 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz_openshift-operators(a85a65c4-faa1-47a5-849c-0715edb9e29d)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz_openshift-operators(a85a65c4-faa1-47a5-849c-0715edb9e29d)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz_openshift-operators_a85a65c4-faa1-47a5-849c-0715edb9e29d_0(b134dac5ba575fcd9aeb9ffe52f84a1e546ed09dca5d0b91f4f59b5820cfc692): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz" podUID="a85a65c4-faa1-47a5-849c-0715edb9e29d" Dec 09 15:20:30 crc kubenswrapper[4716]: I1209 15:20:30.821129 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:30 crc kubenswrapper[4716]: I1209 15:20:30.824772 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/ba73b247-86e8-4b3c-977f-a4bffb4f44e5-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-hlfs7\" (UID: \"ba73b247-86e8-4b3c-977f-a4bffb4f44e5\") " pod="openshift-operators/observability-operator-d8bb48f5d-hlfs7" Dec 09 15:20:30 crc kubenswrapper[4716]: I1209 15:20:30.824854 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hmrxh\" (UniqueName: \"kubernetes.io/projected/51530d86-c01a-4a79-909e-f653c5582af7-kube-api-access-hmrxh\") pod \"perses-operator-5446b9c989-jxhr5\" (UID: \"51530d86-c01a-4a79-909e-f653c5582af7\") " pod="openshift-operators/perses-operator-5446b9c989-jxhr5" Dec 09 15:20:30 crc kubenswrapper[4716]: I1209 15:20:30.824884 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-95vpq\" (UniqueName: \"kubernetes.io/projected/ba73b247-86e8-4b3c-977f-a4bffb4f44e5-kube-api-access-95vpq\") pod \"observability-operator-d8bb48f5d-hlfs7\" (UID: \"ba73b247-86e8-4b3c-977f-a4bffb4f44e5\") " pod="openshift-operators/observability-operator-d8bb48f5d-hlfs7" Dec 09 15:20:30 crc kubenswrapper[4716]: I1209 15:20:30.824989 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/51530d86-c01a-4a79-909e-f653c5582af7-openshift-service-ca\") pod \"perses-operator-5446b9c989-jxhr5\" (UID: \"51530d86-c01a-4a79-909e-f653c5582af7\") " pod="openshift-operators/perses-operator-5446b9c989-jxhr5" Dec 09 15:20:30 crc kubenswrapper[4716]: I1209 15:20:30.831250 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/ba73b247-86e8-4b3c-977f-a4bffb4f44e5-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-hlfs7\" (UID: \"ba73b247-86e8-4b3c-977f-a4bffb4f44e5\") " pod="openshift-operators/observability-operator-d8bb48f5d-hlfs7" Dec 09 15:20:30 crc kubenswrapper[4716]: I1209 15:20:30.853666 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-95vpq\" (UniqueName: \"kubernetes.io/projected/ba73b247-86e8-4b3c-977f-a4bffb4f44e5-kube-api-access-95vpq\") pod \"observability-operator-d8bb48f5d-hlfs7\" (UID: \"ba73b247-86e8-4b3c-977f-a4bffb4f44e5\") " pod="openshift-operators/observability-operator-d8bb48f5d-hlfs7" Dec 09 15:20:30 crc kubenswrapper[4716]: I1209 15:20:30.867651 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:30 crc kubenswrapper[4716]: I1209 15:20:30.926902 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/51530d86-c01a-4a79-909e-f653c5582af7-openshift-service-ca\") pod \"perses-operator-5446b9c989-jxhr5\" (UID: \"51530d86-c01a-4a79-909e-f653c5582af7\") " pod="openshift-operators/perses-operator-5446b9c989-jxhr5" Dec 09 15:20:30 crc kubenswrapper[4716]: I1209 15:20:30.927041 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hmrxh\" (UniqueName: \"kubernetes.io/projected/51530d86-c01a-4a79-909e-f653c5582af7-kube-api-access-hmrxh\") pod \"perses-operator-5446b9c989-jxhr5\" (UID: \"51530d86-c01a-4a79-909e-f653c5582af7\") " pod="openshift-operators/perses-operator-5446b9c989-jxhr5" Dec 09 15:20:30 crc kubenswrapper[4716]: I1209 15:20:30.927991 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/51530d86-c01a-4a79-909e-f653c5582af7-openshift-service-ca\") pod \"perses-operator-5446b9c989-jxhr5\" (UID: \"51530d86-c01a-4a79-909e-f653c5582af7\") " pod="openshift-operators/perses-operator-5446b9c989-jxhr5" Dec 09 15:20:30 crc kubenswrapper[4716]: I1209 15:20:30.953594 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hmrxh\" (UniqueName: \"kubernetes.io/projected/51530d86-c01a-4a79-909e-f653c5582af7-kube-api-access-hmrxh\") pod \"perses-operator-5446b9c989-jxhr5\" (UID: \"51530d86-c01a-4a79-909e-f653c5582af7\") " pod="openshift-operators/perses-operator-5446b9c989-jxhr5" Dec 09 15:20:31 crc kubenswrapper[4716]: I1209 15:20:31.118102 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-hlfs7" Dec 09 15:20:31 crc kubenswrapper[4716]: I1209 15:20:31.128884 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-jxhr5" Dec 09 15:20:31 crc kubenswrapper[4716]: E1209 15:20:31.206101 4716 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-jxhr5_openshift-operators_51530d86-c01a-4a79-909e-f653c5582af7_0(60919a5fc1c2abfd49fa6977b40d87919f3e9ec543c182b77e65d3661322023b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 09 15:20:31 crc kubenswrapper[4716]: E1209 15:20:31.206646 4716 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-jxhr5_openshift-operators_51530d86-c01a-4a79-909e-f653c5582af7_0(60919a5fc1c2abfd49fa6977b40d87919f3e9ec543c182b77e65d3661322023b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-jxhr5" Dec 09 15:20:31 crc kubenswrapper[4716]: E1209 15:20:31.206732 4716 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-jxhr5_openshift-operators_51530d86-c01a-4a79-909e-f653c5582af7_0(60919a5fc1c2abfd49fa6977b40d87919f3e9ec543c182b77e65d3661322023b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-jxhr5" Dec 09 15:20:31 crc kubenswrapper[4716]: E1209 15:20:31.206845 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5446b9c989-jxhr5_openshift-operators(51530d86-c01a-4a79-909e-f653c5582af7)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5446b9c989-jxhr5_openshift-operators(51530d86-c01a-4a79-909e-f653c5582af7)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-jxhr5_openshift-operators_51530d86-c01a-4a79-909e-f653c5582af7_0(60919a5fc1c2abfd49fa6977b40d87919f3e9ec543c182b77e65d3661322023b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5446b9c989-jxhr5" podUID="51530d86-c01a-4a79-909e-f653c5582af7" Dec 09 15:20:31 crc kubenswrapper[4716]: E1209 15:20:31.214505 4716 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-hlfs7_openshift-operators_ba73b247-86e8-4b3c-977f-a4bffb4f44e5_0(dde40e452fb453149c8a934f6f7b54373bc6db73212e742d2bd93fea52de76f1): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 09 15:20:31 crc kubenswrapper[4716]: E1209 15:20:31.214640 4716 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-hlfs7_openshift-operators_ba73b247-86e8-4b3c-977f-a4bffb4f44e5_0(dde40e452fb453149c8a934f6f7b54373bc6db73212e742d2bd93fea52de76f1): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-hlfs7" Dec 09 15:20:31 crc kubenswrapper[4716]: E1209 15:20:31.214681 4716 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-hlfs7_openshift-operators_ba73b247-86e8-4b3c-977f-a4bffb4f44e5_0(dde40e452fb453149c8a934f6f7b54373bc6db73212e742d2bd93fea52de76f1): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-hlfs7" Dec 09 15:20:31 crc kubenswrapper[4716]: E1209 15:20:31.214751 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-d8bb48f5d-hlfs7_openshift-operators(ba73b247-86e8-4b3c-977f-a4bffb4f44e5)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-d8bb48f5d-hlfs7_openshift-operators(ba73b247-86e8-4b3c-977f-a4bffb4f44e5)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-hlfs7_openshift-operators_ba73b247-86e8-4b3c-977f-a4bffb4f44e5_0(dde40e452fb453149c8a934f6f7b54373bc6db73212e742d2bd93fea52de76f1): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-d8bb48f5d-hlfs7" podUID="ba73b247-86e8-4b3c-977f-a4bffb4f44e5" Dec 09 15:20:32 crc kubenswrapper[4716]: I1209 15:20:32.856047 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz"] Dec 09 15:20:32 crc kubenswrapper[4716]: I1209 15:20:32.856275 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz" Dec 09 15:20:32 crc kubenswrapper[4716]: I1209 15:20:32.856984 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz" Dec 09 15:20:32 crc kubenswrapper[4716]: I1209 15:20:32.877383 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp"] Dec 09 15:20:32 crc kubenswrapper[4716]: I1209 15:20:32.877533 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp" Dec 09 15:20:32 crc kubenswrapper[4716]: I1209 15:20:32.878079 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp" Dec 09 15:20:32 crc kubenswrapper[4716]: I1209 15:20:32.975730 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-jxhr5"] Dec 09 15:20:32 crc kubenswrapper[4716]: I1209 15:20:32.975919 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-jxhr5" Dec 09 15:20:32 crc kubenswrapper[4716]: I1209 15:20:32.976589 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-jxhr5" Dec 09 15:20:33 crc kubenswrapper[4716]: E1209 15:20:33.021838 4716 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz_openshift-operators_a85a65c4-faa1-47a5-849c-0715edb9e29d_0(3bd9e9fc17b77724279542875b2422d1abf839b0106deb9169e85134f8d0d596): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 09 15:20:33 crc kubenswrapper[4716]: E1209 15:20:33.021945 4716 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz_openshift-operators_a85a65c4-faa1-47a5-849c-0715edb9e29d_0(3bd9e9fc17b77724279542875b2422d1abf839b0106deb9169e85134f8d0d596): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz" Dec 09 15:20:33 crc kubenswrapper[4716]: E1209 15:20:33.021982 4716 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz_openshift-operators_a85a65c4-faa1-47a5-849c-0715edb9e29d_0(3bd9e9fc17b77724279542875b2422d1abf839b0106deb9169e85134f8d0d596): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz" Dec 09 15:20:33 crc kubenswrapper[4716]: E1209 15:20:33.022046 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz_openshift-operators(a85a65c4-faa1-47a5-849c-0715edb9e29d)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz_openshift-operators(a85a65c4-faa1-47a5-849c-0715edb9e29d)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz_openshift-operators_a85a65c4-faa1-47a5-849c-0715edb9e29d_0(3bd9e9fc17b77724279542875b2422d1abf839b0106deb9169e85134f8d0d596): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz" podUID="a85a65c4-faa1-47a5-849c-0715edb9e29d" Dec 09 15:20:33 crc kubenswrapper[4716]: E1209 15:20:33.048782 4716 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp_openshift-operators_ecae64e4-38fd-4c35-918b-fab84d70ad07_0(e6990b2763da103da1155eb629438a05e2c6391e6d8eb9aec628b503541050f3): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 09 15:20:33 crc kubenswrapper[4716]: E1209 15:20:33.048874 4716 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp_openshift-operators_ecae64e4-38fd-4c35-918b-fab84d70ad07_0(e6990b2763da103da1155eb629438a05e2c6391e6d8eb9aec628b503541050f3): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp" Dec 09 15:20:33 crc kubenswrapper[4716]: E1209 15:20:33.048904 4716 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp_openshift-operators_ecae64e4-38fd-4c35-918b-fab84d70ad07_0(e6990b2763da103da1155eb629438a05e2c6391e6d8eb9aec628b503541050f3): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp" Dec 09 15:20:33 crc kubenswrapper[4716]: E1209 15:20:33.048959 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp_openshift-operators(ecae64e4-38fd-4c35-918b-fab84d70ad07)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp_openshift-operators(ecae64e4-38fd-4c35-918b-fab84d70ad07)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp_openshift-operators_ecae64e4-38fd-4c35-918b-fab84d70ad07_0(e6990b2763da103da1155eb629438a05e2c6391e6d8eb9aec628b503541050f3): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp" podUID="ecae64e4-38fd-4c35-918b-fab84d70ad07" Dec 09 15:20:33 crc kubenswrapper[4716]: E1209 15:20:33.108661 4716 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-jxhr5_openshift-operators_51530d86-c01a-4a79-909e-f653c5582af7_0(b887aa0813d13ce106b9377d4dee5541347ca93864f57ef52d6d93b701e1ce4a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 09 15:20:33 crc kubenswrapper[4716]: E1209 15:20:33.108730 4716 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-jxhr5_openshift-operators_51530d86-c01a-4a79-909e-f653c5582af7_0(b887aa0813d13ce106b9377d4dee5541347ca93864f57ef52d6d93b701e1ce4a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-jxhr5" Dec 09 15:20:33 crc kubenswrapper[4716]: E1209 15:20:33.108761 4716 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-jxhr5_openshift-operators_51530d86-c01a-4a79-909e-f653c5582af7_0(b887aa0813d13ce106b9377d4dee5541347ca93864f57ef52d6d93b701e1ce4a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-jxhr5" Dec 09 15:20:33 crc kubenswrapper[4716]: E1209 15:20:33.108804 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5446b9c989-jxhr5_openshift-operators(51530d86-c01a-4a79-909e-f653c5582af7)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5446b9c989-jxhr5_openshift-operators(51530d86-c01a-4a79-909e-f653c5582af7)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-jxhr5_openshift-operators_51530d86-c01a-4a79-909e-f653c5582af7_0(b887aa0813d13ce106b9377d4dee5541347ca93864f57ef52d6d93b701e1ce4a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5446b9c989-jxhr5" podUID="51530d86-c01a-4a79-909e-f653c5582af7" Dec 09 15:20:33 crc kubenswrapper[4716]: I1209 15:20:33.498792 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-hlfs7"] Dec 09 15:20:33 crc kubenswrapper[4716]: I1209 15:20:33.499263 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-hlfs7" Dec 09 15:20:33 crc kubenswrapper[4716]: I1209 15:20:33.499766 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-hlfs7" Dec 09 15:20:33 crc kubenswrapper[4716]: E1209 15:20:33.577538 4716 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-hlfs7_openshift-operators_ba73b247-86e8-4b3c-977f-a4bffb4f44e5_0(b8c163cc9759b5198987b5bce3d08b5a8129ec3ceab901ad562bc1af8033e55b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 09 15:20:33 crc kubenswrapper[4716]: E1209 15:20:33.577663 4716 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-hlfs7_openshift-operators_ba73b247-86e8-4b3c-977f-a4bffb4f44e5_0(b8c163cc9759b5198987b5bce3d08b5a8129ec3ceab901ad562bc1af8033e55b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-hlfs7" Dec 09 15:20:33 crc kubenswrapper[4716]: E1209 15:20:33.577693 4716 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-hlfs7_openshift-operators_ba73b247-86e8-4b3c-977f-a4bffb4f44e5_0(b8c163cc9759b5198987b5bce3d08b5a8129ec3ceab901ad562bc1af8033e55b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-hlfs7" Dec 09 15:20:33 crc kubenswrapper[4716]: E1209 15:20:33.577757 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-d8bb48f5d-hlfs7_openshift-operators(ba73b247-86e8-4b3c-977f-a4bffb4f44e5)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-d8bb48f5d-hlfs7_openshift-operators(ba73b247-86e8-4b3c-977f-a4bffb4f44e5)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-hlfs7_openshift-operators_ba73b247-86e8-4b3c-977f-a4bffb4f44e5_0(b8c163cc9759b5198987b5bce3d08b5a8129ec3ceab901ad562bc1af8033e55b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-d8bb48f5d-hlfs7" podUID="ba73b247-86e8-4b3c-977f-a4bffb4f44e5" Dec 09 15:20:33 crc kubenswrapper[4716]: I1209 15:20:33.624958 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-jft8l"] Dec 09 15:20:33 crc kubenswrapper[4716]: I1209 15:20:33.625167 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jft8l" Dec 09 15:20:33 crc kubenswrapper[4716]: I1209 15:20:33.625912 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jft8l" Dec 09 15:20:33 crc kubenswrapper[4716]: E1209 15:20:33.790797 4716 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-jft8l_openshift-operators_2631330f-8710-4364-a3ea-6b0455e189f5_0(b4f9b9ba177463ce17f7af0f45246f7865880bc3f2e4c7c8c4b7a1b2d245dbcb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 09 15:20:33 crc kubenswrapper[4716]: E1209 15:20:33.790883 4716 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-jft8l_openshift-operators_2631330f-8710-4364-a3ea-6b0455e189f5_0(b4f9b9ba177463ce17f7af0f45246f7865880bc3f2e4c7c8c4b7a1b2d245dbcb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jft8l" Dec 09 15:20:33 crc kubenswrapper[4716]: E1209 15:20:33.790918 4716 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-jft8l_openshift-operators_2631330f-8710-4364-a3ea-6b0455e189f5_0(b4f9b9ba177463ce17f7af0f45246f7865880bc3f2e4c7c8c4b7a1b2d245dbcb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jft8l" Dec 09 15:20:33 crc kubenswrapper[4716]: E1209 15:20:33.790986 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-668cf9dfbb-jft8l_openshift-operators(2631330f-8710-4364-a3ea-6b0455e189f5)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-668cf9dfbb-jft8l_openshift-operators(2631330f-8710-4364-a3ea-6b0455e189f5)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-jft8l_openshift-operators_2631330f-8710-4364-a3ea-6b0455e189f5_0(b4f9b9ba177463ce17f7af0f45246f7865880bc3f2e4c7c8c4b7a1b2d245dbcb): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jft8l" podUID="2631330f-8710-4364-a3ea-6b0455e189f5" Dec 09 15:20:36 crc kubenswrapper[4716]: I1209 15:20:36.213468 4716 scope.go:117] "RemoveContainer" containerID="a758ac06c7e4b0f191b12fd2d9cd2ef906c39fedcb80562792f0ef219b1f9298" Dec 09 15:20:36 crc kubenswrapper[4716]: E1209 15:20:36.214252 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-rqz4n_openshift-multus(38b4e174-ba72-4a0f-9eed-f2ce970c0afc)\"" pod="openshift-multus/multus-rqz4n" podUID="38b4e174-ba72-4a0f-9eed-f2ce970c0afc" Dec 09 15:20:44 crc kubenswrapper[4716]: I1209 15:20:44.213051 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-jxhr5" Dec 09 15:20:44 crc kubenswrapper[4716]: I1209 15:20:44.214179 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-jxhr5" Dec 09 15:20:44 crc kubenswrapper[4716]: E1209 15:20:44.253683 4716 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-jxhr5_openshift-operators_51530d86-c01a-4a79-909e-f653c5582af7_0(eb6125e2fd18ea9bef15e6c2e00fc799ecaf5a23cacde64f14cb69417aaa5728): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 09 15:20:44 crc kubenswrapper[4716]: E1209 15:20:44.254209 4716 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-jxhr5_openshift-operators_51530d86-c01a-4a79-909e-f653c5582af7_0(eb6125e2fd18ea9bef15e6c2e00fc799ecaf5a23cacde64f14cb69417aaa5728): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-jxhr5" Dec 09 15:20:44 crc kubenswrapper[4716]: E1209 15:20:44.254236 4716 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-jxhr5_openshift-operators_51530d86-c01a-4a79-909e-f653c5582af7_0(eb6125e2fd18ea9bef15e6c2e00fc799ecaf5a23cacde64f14cb69417aaa5728): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-jxhr5" Dec 09 15:20:44 crc kubenswrapper[4716]: E1209 15:20:44.254298 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5446b9c989-jxhr5_openshift-operators(51530d86-c01a-4a79-909e-f653c5582af7)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5446b9c989-jxhr5_openshift-operators(51530d86-c01a-4a79-909e-f653c5582af7)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-jxhr5_openshift-operators_51530d86-c01a-4a79-909e-f653c5582af7_0(eb6125e2fd18ea9bef15e6c2e00fc799ecaf5a23cacde64f14cb69417aaa5728): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5446b9c989-jxhr5" podUID="51530d86-c01a-4a79-909e-f653c5582af7" Dec 09 15:20:45 crc kubenswrapper[4716]: I1209 15:20:45.213334 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-hlfs7" Dec 09 15:20:45 crc kubenswrapper[4716]: I1209 15:20:45.213452 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp" Dec 09 15:20:45 crc kubenswrapper[4716]: I1209 15:20:45.214642 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-hlfs7" Dec 09 15:20:45 crc kubenswrapper[4716]: I1209 15:20:45.215075 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp" Dec 09 15:20:45 crc kubenswrapper[4716]: E1209 15:20:45.265897 4716 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp_openshift-operators_ecae64e4-38fd-4c35-918b-fab84d70ad07_0(5b9d287d1ce47d9a42fb3b7fd83c4525ef98825cd79ff8e19a62619e1b12e650): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 09 15:20:45 crc kubenswrapper[4716]: E1209 15:20:45.266007 4716 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp_openshift-operators_ecae64e4-38fd-4c35-918b-fab84d70ad07_0(5b9d287d1ce47d9a42fb3b7fd83c4525ef98825cd79ff8e19a62619e1b12e650): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp" Dec 09 15:20:45 crc kubenswrapper[4716]: E1209 15:20:45.266034 4716 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp_openshift-operators_ecae64e4-38fd-4c35-918b-fab84d70ad07_0(5b9d287d1ce47d9a42fb3b7fd83c4525ef98825cd79ff8e19a62619e1b12e650): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp" Dec 09 15:20:45 crc kubenswrapper[4716]: E1209 15:20:45.266109 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp_openshift-operators(ecae64e4-38fd-4c35-918b-fab84d70ad07)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp_openshift-operators(ecae64e4-38fd-4c35-918b-fab84d70ad07)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp_openshift-operators_ecae64e4-38fd-4c35-918b-fab84d70ad07_0(5b9d287d1ce47d9a42fb3b7fd83c4525ef98825cd79ff8e19a62619e1b12e650): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp" podUID="ecae64e4-38fd-4c35-918b-fab84d70ad07" Dec 09 15:20:45 crc kubenswrapper[4716]: E1209 15:20:45.278892 4716 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-hlfs7_openshift-operators_ba73b247-86e8-4b3c-977f-a4bffb4f44e5_0(d0dcd10b01bfd8637100be7b2da3b64867daad6e909286a419628b3eb0b0ae34): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 09 15:20:45 crc kubenswrapper[4716]: E1209 15:20:45.278991 4716 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-hlfs7_openshift-operators_ba73b247-86e8-4b3c-977f-a4bffb4f44e5_0(d0dcd10b01bfd8637100be7b2da3b64867daad6e909286a419628b3eb0b0ae34): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-hlfs7" Dec 09 15:20:45 crc kubenswrapper[4716]: E1209 15:20:45.279023 4716 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-hlfs7_openshift-operators_ba73b247-86e8-4b3c-977f-a4bffb4f44e5_0(d0dcd10b01bfd8637100be7b2da3b64867daad6e909286a419628b3eb0b0ae34): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-hlfs7" Dec 09 15:20:45 crc kubenswrapper[4716]: E1209 15:20:45.279082 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-d8bb48f5d-hlfs7_openshift-operators(ba73b247-86e8-4b3c-977f-a4bffb4f44e5)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-d8bb48f5d-hlfs7_openshift-operators(ba73b247-86e8-4b3c-977f-a4bffb4f44e5)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-hlfs7_openshift-operators_ba73b247-86e8-4b3c-977f-a4bffb4f44e5_0(d0dcd10b01bfd8637100be7b2da3b64867daad6e909286a419628b3eb0b0ae34): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-d8bb48f5d-hlfs7" podUID="ba73b247-86e8-4b3c-977f-a4bffb4f44e5" Dec 09 15:20:47 crc kubenswrapper[4716]: I1209 15:20:47.213373 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jft8l" Dec 09 15:20:47 crc kubenswrapper[4716]: I1209 15:20:47.213373 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz" Dec 09 15:20:47 crc kubenswrapper[4716]: I1209 15:20:47.214792 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz" Dec 09 15:20:47 crc kubenswrapper[4716]: I1209 15:20:47.214799 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jft8l" Dec 09 15:20:47 crc kubenswrapper[4716]: E1209 15:20:47.351962 4716 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz_openshift-operators_a85a65c4-faa1-47a5-849c-0715edb9e29d_0(eccbf93fb3b56ac87a46419d166c0e53361ce1b83545f16a59aa6cf726258428): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 09 15:20:47 crc kubenswrapper[4716]: E1209 15:20:47.352052 4716 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz_openshift-operators_a85a65c4-faa1-47a5-849c-0715edb9e29d_0(eccbf93fb3b56ac87a46419d166c0e53361ce1b83545f16a59aa6cf726258428): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz" Dec 09 15:20:47 crc kubenswrapper[4716]: E1209 15:20:47.352090 4716 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz_openshift-operators_a85a65c4-faa1-47a5-849c-0715edb9e29d_0(eccbf93fb3b56ac87a46419d166c0e53361ce1b83545f16a59aa6cf726258428): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz" Dec 09 15:20:47 crc kubenswrapper[4716]: E1209 15:20:47.352156 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz_openshift-operators(a85a65c4-faa1-47a5-849c-0715edb9e29d)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz_openshift-operators(a85a65c4-faa1-47a5-849c-0715edb9e29d)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz_openshift-operators_a85a65c4-faa1-47a5-849c-0715edb9e29d_0(eccbf93fb3b56ac87a46419d166c0e53361ce1b83545f16a59aa6cf726258428): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz" podUID="a85a65c4-faa1-47a5-849c-0715edb9e29d" Dec 09 15:20:47 crc kubenswrapper[4716]: E1209 15:20:47.365946 4716 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-jft8l_openshift-operators_2631330f-8710-4364-a3ea-6b0455e189f5_0(3c5f7ef7dd75b25b5e321819931ffaac9619f430e1b017c6684759b02e0d7a20): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 09 15:20:47 crc kubenswrapper[4716]: E1209 15:20:47.366053 4716 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-jft8l_openshift-operators_2631330f-8710-4364-a3ea-6b0455e189f5_0(3c5f7ef7dd75b25b5e321819931ffaac9619f430e1b017c6684759b02e0d7a20): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jft8l" Dec 09 15:20:47 crc kubenswrapper[4716]: E1209 15:20:47.366084 4716 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-jft8l_openshift-operators_2631330f-8710-4364-a3ea-6b0455e189f5_0(3c5f7ef7dd75b25b5e321819931ffaac9619f430e1b017c6684759b02e0d7a20): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jft8l" Dec 09 15:20:47 crc kubenswrapper[4716]: E1209 15:20:47.366141 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-668cf9dfbb-jft8l_openshift-operators(2631330f-8710-4364-a3ea-6b0455e189f5)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-668cf9dfbb-jft8l_openshift-operators(2631330f-8710-4364-a3ea-6b0455e189f5)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-jft8l_openshift-operators_2631330f-8710-4364-a3ea-6b0455e189f5_0(3c5f7ef7dd75b25b5e321819931ffaac9619f430e1b017c6684759b02e0d7a20): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jft8l" podUID="2631330f-8710-4364-a3ea-6b0455e189f5" Dec 09 15:20:48 crc kubenswrapper[4716]: I1209 15:20:48.214152 4716 scope.go:117] "RemoveContainer" containerID="a758ac06c7e4b0f191b12fd2d9cd2ef906c39fedcb80562792f0ef219b1f9298" Dec 09 15:20:49 crc kubenswrapper[4716]: I1209 15:20:49.215473 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rqz4n_38b4e174-ba72-4a0f-9eed-f2ce970c0afc/kube-multus/2.log" Dec 09 15:20:49 crc kubenswrapper[4716]: I1209 15:20:49.222769 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rqz4n" event={"ID":"38b4e174-ba72-4a0f-9eed-f2ce970c0afc","Type":"ContainerStarted","Data":"c485cb998d07cef207871a24ed6922137871561165b1aa302a73d90b31973cfe"} Dec 09 15:20:52 crc kubenswrapper[4716]: I1209 15:20:52.295000 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-dp6p6" Dec 09 15:20:57 crc kubenswrapper[4716]: I1209 15:20:57.212752 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-hlfs7" Dec 09 15:20:57 crc kubenswrapper[4716]: I1209 15:20:57.214186 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-hlfs7" Dec 09 15:20:57 crc kubenswrapper[4716]: I1209 15:20:57.526056 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-hlfs7"] Dec 09 15:20:58 crc kubenswrapper[4716]: I1209 15:20:58.360779 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-hlfs7" event={"ID":"ba73b247-86e8-4b3c-977f-a4bffb4f44e5","Type":"ContainerStarted","Data":"e37afdff84e5468d996ed3572d457dc0025a4c110f417160b833f522a342b7c3"} Dec 09 15:20:59 crc kubenswrapper[4716]: I1209 15:20:59.213315 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-jxhr5" Dec 09 15:20:59 crc kubenswrapper[4716]: I1209 15:20:59.214406 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-jxhr5" Dec 09 15:20:59 crc kubenswrapper[4716]: I1209 15:20:59.452848 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-jxhr5"] Dec 09 15:21:00 crc kubenswrapper[4716]: I1209 15:21:00.213455 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp" Dec 09 15:21:00 crc kubenswrapper[4716]: I1209 15:21:00.215580 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp" Dec 09 15:21:00 crc kubenswrapper[4716]: I1209 15:21:00.390633 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-jxhr5" event={"ID":"51530d86-c01a-4a79-909e-f653c5582af7","Type":"ContainerStarted","Data":"6895cba4bb31e6dd6bf6c197ddc899c5865f5af326146721d20eee8fe6630560"} Dec 09 15:21:00 crc kubenswrapper[4716]: I1209 15:21:00.619487 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp"] Dec 09 15:21:00 crc kubenswrapper[4716]: W1209 15:21:00.660296 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podecae64e4_38fd_4c35_918b_fab84d70ad07.slice/crio-cb888261747428b6ae696fb68681832d01310f731d4ca0ea3ff354c80258b6a2 WatchSource:0}: Error finding container cb888261747428b6ae696fb68681832d01310f731d4ca0ea3ff354c80258b6a2: Status 404 returned error can't find the container with id cb888261747428b6ae696fb68681832d01310f731d4ca0ea3ff354c80258b6a2 Dec 09 15:21:01 crc kubenswrapper[4716]: I1209 15:21:01.213792 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz" Dec 09 15:21:01 crc kubenswrapper[4716]: I1209 15:21:01.214531 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz" Dec 09 15:21:01 crc kubenswrapper[4716]: I1209 15:21:01.214787 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jft8l" Dec 09 15:21:01 crc kubenswrapper[4716]: I1209 15:21:01.215712 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jft8l" Dec 09 15:21:01 crc kubenswrapper[4716]: I1209 15:21:01.411451 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp" event={"ID":"ecae64e4-38fd-4c35-918b-fab84d70ad07","Type":"ContainerStarted","Data":"cb888261747428b6ae696fb68681832d01310f731d4ca0ea3ff354c80258b6a2"} Dec 09 15:21:01 crc kubenswrapper[4716]: I1209 15:21:01.946245 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-jft8l"] Dec 09 15:21:01 crc kubenswrapper[4716]: I1209 15:21:01.995885 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz"] Dec 09 15:21:02 crc kubenswrapper[4716]: W1209 15:21:02.008776 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda85a65c4_faa1_47a5_849c_0715edb9e29d.slice/crio-f7d0e43ec4ac6fa4725fbd8f10a708d709ca818195d55f77ab2839a659853db6 WatchSource:0}: Error finding container f7d0e43ec4ac6fa4725fbd8f10a708d709ca818195d55f77ab2839a659853db6: Status 404 returned error can't find the container with id f7d0e43ec4ac6fa4725fbd8f10a708d709ca818195d55f77ab2839a659853db6 Dec 09 15:21:02 crc kubenswrapper[4716]: I1209 15:21:02.421468 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz" event={"ID":"a85a65c4-faa1-47a5-849c-0715edb9e29d","Type":"ContainerStarted","Data":"f7d0e43ec4ac6fa4725fbd8f10a708d709ca818195d55f77ab2839a659853db6"} Dec 09 15:21:02 crc kubenswrapper[4716]: I1209 15:21:02.423661 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jft8l" event={"ID":"2631330f-8710-4364-a3ea-6b0455e189f5","Type":"ContainerStarted","Data":"840c82fe68023bb269b53df28803b3217daa7be8b4c4d79c8bf254524db9086e"} Dec 09 15:21:05 crc kubenswrapper[4716]: I1209 15:21:04.992570 4716 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Dec 09 15:21:15 crc kubenswrapper[4716]: I1209 15:21:15.320831 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-hlfs7" event={"ID":"ba73b247-86e8-4b3c-977f-a4bffb4f44e5","Type":"ContainerStarted","Data":"25dd1e6639df9d4de26da5b374c14d913bd7a4389c0955b97ab1c5bf1ac831b3"} Dec 09 15:21:15 crc kubenswrapper[4716]: I1209 15:21:15.322821 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-d8bb48f5d-hlfs7" Dec 09 15:21:15 crc kubenswrapper[4716]: I1209 15:21:15.325507 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz" event={"ID":"a85a65c4-faa1-47a5-849c-0715edb9e29d","Type":"ContainerStarted","Data":"3a51efa14663c8550590a5b65bb702ca376b97f6df505f508a410e88393d1ad5"} Dec 09 15:21:15 crc kubenswrapper[4716]: I1209 15:21:15.328304 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-jxhr5" event={"ID":"51530d86-c01a-4a79-909e-f653c5582af7","Type":"ContainerStarted","Data":"a8b24a41c5d5d11f1e0ae06ff1054114b2a779249126008b371604391b17979a"} Dec 09 15:21:15 crc kubenswrapper[4716]: I1209 15:21:15.328414 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5446b9c989-jxhr5" Dec 09 15:21:15 crc kubenswrapper[4716]: I1209 15:21:15.330061 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp" event={"ID":"ecae64e4-38fd-4c35-918b-fab84d70ad07","Type":"ContainerStarted","Data":"60ef14e3c867e027ae1491a9623797bf8af51354a92a0e145f93342ddbebfb0f"} Dec 09 15:21:15 crc kubenswrapper[4716]: I1209 15:21:15.335889 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jft8l" event={"ID":"2631330f-8710-4364-a3ea-6b0455e189f5","Type":"ContainerStarted","Data":"e91ccfabd6714b1655de00f92196d1e24d32587e54fd33cd4a5149344730a371"} Dec 09 15:21:15 crc kubenswrapper[4716]: I1209 15:21:15.362117 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-d8bb48f5d-hlfs7" podStartSLOduration=30.479402653 podStartE2EDuration="46.362045346s" podCreationTimestamp="2025-12-09 15:20:29 +0000 UTC" firstStartedPulling="2025-12-09 15:20:57.535153806 +0000 UTC m=+744.689897794" lastFinishedPulling="2025-12-09 15:21:13.417796499 +0000 UTC m=+760.572540487" observedRunningTime="2025-12-09 15:21:15.348238244 +0000 UTC m=+762.502982252" watchObservedRunningTime="2025-12-09 15:21:15.362045346 +0000 UTC m=+762.516789344" Dec 09 15:21:15 crc kubenswrapper[4716]: I1209 15:21:15.377976 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jft8l" podStartSLOduration=34.873746602 podStartE2EDuration="46.377924047s" podCreationTimestamp="2025-12-09 15:20:29 +0000 UTC" firstStartedPulling="2025-12-09 15:21:01.95474034 +0000 UTC m=+749.109484328" lastFinishedPulling="2025-12-09 15:21:13.458917775 +0000 UTC m=+760.613661773" observedRunningTime="2025-12-09 15:21:15.373969342 +0000 UTC m=+762.528713340" watchObservedRunningTime="2025-12-09 15:21:15.377924047 +0000 UTC m=+762.532668035" Dec 09 15:21:15 crc kubenswrapper[4716]: I1209 15:21:15.404505 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz" podStartSLOduration=34.955012526 podStartE2EDuration="46.404476939s" podCreationTimestamp="2025-12-09 15:20:29 +0000 UTC" firstStartedPulling="2025-12-09 15:21:02.011949614 +0000 UTC m=+749.166693602" lastFinishedPulling="2025-12-09 15:21:13.461414017 +0000 UTC m=+760.616158015" observedRunningTime="2025-12-09 15:21:15.40035957 +0000 UTC m=+762.555103558" watchObservedRunningTime="2025-12-09 15:21:15.404476939 +0000 UTC m=+762.559220927" Dec 09 15:21:15 crc kubenswrapper[4716]: I1209 15:21:15.442917 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp" podStartSLOduration=33.650657947 podStartE2EDuration="46.442891896s" podCreationTimestamp="2025-12-09 15:20:29 +0000 UTC" firstStartedPulling="2025-12-09 15:21:00.666868021 +0000 UTC m=+747.821612009" lastFinishedPulling="2025-12-09 15:21:13.45910197 +0000 UTC m=+760.613845958" observedRunningTime="2025-12-09 15:21:15.437941363 +0000 UTC m=+762.592685351" watchObservedRunningTime="2025-12-09 15:21:15.442891896 +0000 UTC m=+762.597635884" Dec 09 15:21:15 crc kubenswrapper[4716]: I1209 15:21:15.481449 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-5446b9c989-jxhr5" podStartSLOduration=31.600873202 podStartE2EDuration="45.481421397s" podCreationTimestamp="2025-12-09 15:20:30 +0000 UTC" firstStartedPulling="2025-12-09 15:20:59.46765158 +0000 UTC m=+746.622395578" lastFinishedPulling="2025-12-09 15:21:13.348199785 +0000 UTC m=+760.502943773" observedRunningTime="2025-12-09 15:21:15.477643477 +0000 UTC m=+762.632387475" watchObservedRunningTime="2025-12-09 15:21:15.481421397 +0000 UTC m=+762.636165385" Dec 09 15:21:16 crc kubenswrapper[4716]: I1209 15:21:16.365960 4716 patch_prober.go:28] interesting pod/observability-operator-d8bb48f5d-hlfs7 container/operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.25:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 09 15:21:16 crc kubenswrapper[4716]: I1209 15:21:16.366540 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/observability-operator-d8bb48f5d-hlfs7" podUID="ba73b247-86e8-4b3c-977f-a4bffb4f44e5" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.25:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 09 15:21:17 crc kubenswrapper[4716]: I1209 15:21:17.408986 4716 patch_prober.go:28] interesting pod/observability-operator-d8bb48f5d-hlfs7 container/operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.25:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 09 15:21:17 crc kubenswrapper[4716]: I1209 15:21:17.409906 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/observability-operator-d8bb48f5d-hlfs7" podUID="ba73b247-86e8-4b3c-977f-a4bffb4f44e5" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.25:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 09 15:21:21 crc kubenswrapper[4716]: I1209 15:21:21.120912 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-d8bb48f5d-hlfs7" Dec 09 15:21:21 crc kubenswrapper[4716]: I1209 15:21:21.134693 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5446b9c989-jxhr5" Dec 09 15:21:28 crc kubenswrapper[4716]: I1209 15:21:28.509022 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-4skkl"] Dec 09 15:21:28 crc kubenswrapper[4716]: I1209 15:21:28.511960 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-4skkl" Dec 09 15:21:28 crc kubenswrapper[4716]: I1209 15:21:28.514478 4716 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-lwb6s" Dec 09 15:21:28 crc kubenswrapper[4716]: I1209 15:21:28.515192 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Dec 09 15:21:28 crc kubenswrapper[4716]: I1209 15:21:28.515436 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Dec 09 15:21:28 crc kubenswrapper[4716]: I1209 15:21:28.525988 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-4skkl"] Dec 09 15:21:28 crc kubenswrapper[4716]: I1209 15:21:28.585296 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-fkctm"] Dec 09 15:21:28 crc kubenswrapper[4716]: I1209 15:21:28.586581 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-fkctm" Dec 09 15:21:28 crc kubenswrapper[4716]: I1209 15:21:28.589486 4716 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-4w5rr" Dec 09 15:21:28 crc kubenswrapper[4716]: I1209 15:21:28.596340 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-29tfx\" (UniqueName: \"kubernetes.io/projected/8f4f2faa-6c38-44b8-a6ab-057e6819b498-kube-api-access-29tfx\") pod \"cert-manager-cainjector-7f985d654d-4skkl\" (UID: \"8f4f2faa-6c38-44b8-a6ab-057e6819b498\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-4skkl" Dec 09 15:21:28 crc kubenswrapper[4716]: I1209 15:21:28.616706 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-fkctm"] Dec 09 15:21:28 crc kubenswrapper[4716]: I1209 15:21:28.623966 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-cfbrw"] Dec 09 15:21:28 crc kubenswrapper[4716]: I1209 15:21:28.625386 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-cfbrw" Dec 09 15:21:28 crc kubenswrapper[4716]: I1209 15:21:28.628599 4716 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-b4s7x" Dec 09 15:21:28 crc kubenswrapper[4716]: I1209 15:21:28.630318 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-cfbrw"] Dec 09 15:21:28 crc kubenswrapper[4716]: I1209 15:21:28.697980 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f5q7h\" (UniqueName: \"kubernetes.io/projected/d474af73-4a93-4368-90e4-01f572590cab-kube-api-access-f5q7h\") pod \"cert-manager-5b446d88c5-fkctm\" (UID: \"d474af73-4a93-4368-90e4-01f572590cab\") " pod="cert-manager/cert-manager-5b446d88c5-fkctm" Dec 09 15:21:28 crc kubenswrapper[4716]: I1209 15:21:28.698067 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jvc89\" (UniqueName: \"kubernetes.io/projected/d1900052-b34f-4e6c-93fe-b58810e88ecf-kube-api-access-jvc89\") pod \"cert-manager-webhook-5655c58dd6-cfbrw\" (UID: \"d1900052-b34f-4e6c-93fe-b58810e88ecf\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-cfbrw" Dec 09 15:21:28 crc kubenswrapper[4716]: I1209 15:21:28.698201 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-29tfx\" (UniqueName: \"kubernetes.io/projected/8f4f2faa-6c38-44b8-a6ab-057e6819b498-kube-api-access-29tfx\") pod \"cert-manager-cainjector-7f985d654d-4skkl\" (UID: \"8f4f2faa-6c38-44b8-a6ab-057e6819b498\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-4skkl" Dec 09 15:21:28 crc kubenswrapper[4716]: I1209 15:21:28.721754 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-29tfx\" (UniqueName: \"kubernetes.io/projected/8f4f2faa-6c38-44b8-a6ab-057e6819b498-kube-api-access-29tfx\") pod \"cert-manager-cainjector-7f985d654d-4skkl\" (UID: \"8f4f2faa-6c38-44b8-a6ab-057e6819b498\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-4skkl" Dec 09 15:21:28 crc kubenswrapper[4716]: I1209 15:21:28.800648 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f5q7h\" (UniqueName: \"kubernetes.io/projected/d474af73-4a93-4368-90e4-01f572590cab-kube-api-access-f5q7h\") pod \"cert-manager-5b446d88c5-fkctm\" (UID: \"d474af73-4a93-4368-90e4-01f572590cab\") " pod="cert-manager/cert-manager-5b446d88c5-fkctm" Dec 09 15:21:28 crc kubenswrapper[4716]: I1209 15:21:28.800800 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jvc89\" (UniqueName: \"kubernetes.io/projected/d1900052-b34f-4e6c-93fe-b58810e88ecf-kube-api-access-jvc89\") pod \"cert-manager-webhook-5655c58dd6-cfbrw\" (UID: \"d1900052-b34f-4e6c-93fe-b58810e88ecf\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-cfbrw" Dec 09 15:21:28 crc kubenswrapper[4716]: I1209 15:21:28.832452 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jvc89\" (UniqueName: \"kubernetes.io/projected/d1900052-b34f-4e6c-93fe-b58810e88ecf-kube-api-access-jvc89\") pod \"cert-manager-webhook-5655c58dd6-cfbrw\" (UID: \"d1900052-b34f-4e6c-93fe-b58810e88ecf\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-cfbrw" Dec 09 15:21:28 crc kubenswrapper[4716]: I1209 15:21:28.832492 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f5q7h\" (UniqueName: \"kubernetes.io/projected/d474af73-4a93-4368-90e4-01f572590cab-kube-api-access-f5q7h\") pod \"cert-manager-5b446d88c5-fkctm\" (UID: \"d474af73-4a93-4368-90e4-01f572590cab\") " pod="cert-manager/cert-manager-5b446d88c5-fkctm" Dec 09 15:21:28 crc kubenswrapper[4716]: I1209 15:21:28.843296 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-4skkl" Dec 09 15:21:28 crc kubenswrapper[4716]: I1209 15:21:28.917132 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-fkctm" Dec 09 15:21:28 crc kubenswrapper[4716]: I1209 15:21:28.959728 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-cfbrw" Dec 09 15:21:29 crc kubenswrapper[4716]: I1209 15:21:29.562149 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-cfbrw"] Dec 09 15:21:29 crc kubenswrapper[4716]: W1209 15:21:29.574711 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd1900052_b34f_4e6c_93fe_b58810e88ecf.slice/crio-30f1f4a1ebeb4b0c35936a4c47640e48396291d34c90b1aa55de3df13bb67482 WatchSource:0}: Error finding container 30f1f4a1ebeb4b0c35936a4c47640e48396291d34c90b1aa55de3df13bb67482: Status 404 returned error can't find the container with id 30f1f4a1ebeb4b0c35936a4c47640e48396291d34c90b1aa55de3df13bb67482 Dec 09 15:21:29 crc kubenswrapper[4716]: I1209 15:21:29.615714 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-4skkl"] Dec 09 15:21:29 crc kubenswrapper[4716]: I1209 15:21:29.808248 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-fkctm"] Dec 09 15:21:30 crc kubenswrapper[4716]: I1209 15:21:30.481451 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-4skkl" event={"ID":"8f4f2faa-6c38-44b8-a6ab-057e6819b498","Type":"ContainerStarted","Data":"646a095c14be236af3381c9fa32241be5369b03782286f415be4d05fd022345d"} Dec 09 15:21:30 crc kubenswrapper[4716]: I1209 15:21:30.483422 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-fkctm" event={"ID":"d474af73-4a93-4368-90e4-01f572590cab","Type":"ContainerStarted","Data":"fac45f939bdd3490be68d06dac38231511cd116cdfaa006c55d92cfaa25f225f"} Dec 09 15:21:30 crc kubenswrapper[4716]: I1209 15:21:30.484807 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-cfbrw" event={"ID":"d1900052-b34f-4e6c-93fe-b58810e88ecf","Type":"ContainerStarted","Data":"30f1f4a1ebeb4b0c35936a4c47640e48396291d34c90b1aa55de3df13bb67482"} Dec 09 15:21:35 crc kubenswrapper[4716]: I1209 15:21:35.665771 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-fkctm" event={"ID":"d474af73-4a93-4368-90e4-01f572590cab","Type":"ContainerStarted","Data":"86e83a30da45207b9c17a5283442484f5ee6b5d1e557e11f16b436dd5c68d4aa"} Dec 09 15:21:35 crc kubenswrapper[4716]: I1209 15:21:35.667266 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-cfbrw" event={"ID":"d1900052-b34f-4e6c-93fe-b58810e88ecf","Type":"ContainerStarted","Data":"7968dcc85da27bd34f31d6c34ef74ceee9816f0fa606d5c50b77922171a73faa"} Dec 09 15:21:35 crc kubenswrapper[4716]: I1209 15:21:35.667824 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-cfbrw" Dec 09 15:21:35 crc kubenswrapper[4716]: I1209 15:21:35.669168 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-4skkl" event={"ID":"8f4f2faa-6c38-44b8-a6ab-057e6819b498","Type":"ContainerStarted","Data":"2c63f76e79fce2ee8f4dd8cacf2e48a5f8f07ef9e682f3f7e355c30c1bd69123"} Dec 09 15:21:35 crc kubenswrapper[4716]: I1209 15:21:35.689555 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-fkctm" podStartSLOduration=3.003468976 podStartE2EDuration="7.68952153s" podCreationTimestamp="2025-12-09 15:21:28 +0000 UTC" firstStartedPulling="2025-12-09 15:21:29.823833133 +0000 UTC m=+776.978577121" lastFinishedPulling="2025-12-09 15:21:34.509885687 +0000 UTC m=+781.664629675" observedRunningTime="2025-12-09 15:21:35.686290847 +0000 UTC m=+782.841034835" watchObservedRunningTime="2025-12-09 15:21:35.68952153 +0000 UTC m=+782.844265508" Dec 09 15:21:35 crc kubenswrapper[4716]: I1209 15:21:35.832867 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-4skkl" podStartSLOduration=3.065780836 podStartE2EDuration="7.832847446s" podCreationTimestamp="2025-12-09 15:21:28 +0000 UTC" firstStartedPulling="2025-12-09 15:21:29.608361138 +0000 UTC m=+776.763105126" lastFinishedPulling="2025-12-09 15:21:34.375427748 +0000 UTC m=+781.530171736" observedRunningTime="2025-12-09 15:21:35.826694778 +0000 UTC m=+782.981438766" watchObservedRunningTime="2025-12-09 15:21:35.832847446 +0000 UTC m=+782.987591434" Dec 09 15:21:35 crc kubenswrapper[4716]: I1209 15:21:35.861794 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-cfbrw" podStartSLOduration=3.065776113 podStartE2EDuration="7.861759635s" podCreationTimestamp="2025-12-09 15:21:28 +0000 UTC" firstStartedPulling="2025-12-09 15:21:29.578157029 +0000 UTC m=+776.732901017" lastFinishedPulling="2025-12-09 15:21:34.374140541 +0000 UTC m=+781.528884539" observedRunningTime="2025-12-09 15:21:35.86023105 +0000 UTC m=+783.014975028" watchObservedRunningTime="2025-12-09 15:21:35.861759635 +0000 UTC m=+783.016503623" Dec 09 15:21:43 crc kubenswrapper[4716]: I1209 15:21:43.964691 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-cfbrw" Dec 09 15:21:47 crc kubenswrapper[4716]: I1209 15:21:47.923166 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 15:21:47 crc kubenswrapper[4716]: I1209 15:21:47.924173 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 15:22:14 crc kubenswrapper[4716]: I1209 15:22:14.156524 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f7l29s"] Dec 09 15:22:14 crc kubenswrapper[4716]: I1209 15:22:14.158962 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f7l29s" Dec 09 15:22:14 crc kubenswrapper[4716]: I1209 15:22:14.166745 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Dec 09 15:22:14 crc kubenswrapper[4716]: I1209 15:22:14.175851 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f7l29s"] Dec 09 15:22:14 crc kubenswrapper[4716]: I1209 15:22:14.284074 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rxzx6\" (UniqueName: \"kubernetes.io/projected/a0ebcd27-55c5-41b1-9663-2b880809ff5d-kube-api-access-rxzx6\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f7l29s\" (UID: \"a0ebcd27-55c5-41b1-9663-2b880809ff5d\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f7l29s" Dec 09 15:22:14 crc kubenswrapper[4716]: I1209 15:22:14.284191 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a0ebcd27-55c5-41b1-9663-2b880809ff5d-util\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f7l29s\" (UID: \"a0ebcd27-55c5-41b1-9663-2b880809ff5d\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f7l29s" Dec 09 15:22:14 crc kubenswrapper[4716]: I1209 15:22:14.284228 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a0ebcd27-55c5-41b1-9663-2b880809ff5d-bundle\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f7l29s\" (UID: \"a0ebcd27-55c5-41b1-9663-2b880809ff5d\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f7l29s" Dec 09 15:22:14 crc kubenswrapper[4716]: I1209 15:22:14.386228 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a0ebcd27-55c5-41b1-9663-2b880809ff5d-util\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f7l29s\" (UID: \"a0ebcd27-55c5-41b1-9663-2b880809ff5d\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f7l29s" Dec 09 15:22:14 crc kubenswrapper[4716]: I1209 15:22:14.386303 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a0ebcd27-55c5-41b1-9663-2b880809ff5d-bundle\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f7l29s\" (UID: \"a0ebcd27-55c5-41b1-9663-2b880809ff5d\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f7l29s" Dec 09 15:22:14 crc kubenswrapper[4716]: I1209 15:22:14.386399 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rxzx6\" (UniqueName: \"kubernetes.io/projected/a0ebcd27-55c5-41b1-9663-2b880809ff5d-kube-api-access-rxzx6\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f7l29s\" (UID: \"a0ebcd27-55c5-41b1-9663-2b880809ff5d\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f7l29s" Dec 09 15:22:14 crc kubenswrapper[4716]: I1209 15:22:14.387007 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a0ebcd27-55c5-41b1-9663-2b880809ff5d-util\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f7l29s\" (UID: \"a0ebcd27-55c5-41b1-9663-2b880809ff5d\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f7l29s" Dec 09 15:22:14 crc kubenswrapper[4716]: I1209 15:22:14.387090 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a0ebcd27-55c5-41b1-9663-2b880809ff5d-bundle\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f7l29s\" (UID: \"a0ebcd27-55c5-41b1-9663-2b880809ff5d\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f7l29s" Dec 09 15:22:14 crc kubenswrapper[4716]: I1209 15:22:14.413430 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rxzx6\" (UniqueName: \"kubernetes.io/projected/a0ebcd27-55c5-41b1-9663-2b880809ff5d-kube-api-access-rxzx6\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f7l29s\" (UID: \"a0ebcd27-55c5-41b1-9663-2b880809ff5d\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f7l29s" Dec 09 15:22:14 crc kubenswrapper[4716]: I1209 15:22:14.482493 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f7l29s" Dec 09 15:22:14 crc kubenswrapper[4716]: I1209 15:22:14.569147 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8zv4vn"] Dec 09 15:22:14 crc kubenswrapper[4716]: I1209 15:22:14.571296 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8zv4vn" Dec 09 15:22:14 crc kubenswrapper[4716]: I1209 15:22:14.603873 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8zv4vn"] Dec 09 15:22:14 crc kubenswrapper[4716]: I1209 15:22:14.692138 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-srrkh\" (UniqueName: \"kubernetes.io/projected/cc820795-3bec-42ce-b838-37592232bfb1-kube-api-access-srrkh\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8zv4vn\" (UID: \"cc820795-3bec-42ce-b838-37592232bfb1\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8zv4vn" Dec 09 15:22:14 crc kubenswrapper[4716]: I1209 15:22:14.692217 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/cc820795-3bec-42ce-b838-37592232bfb1-bundle\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8zv4vn\" (UID: \"cc820795-3bec-42ce-b838-37592232bfb1\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8zv4vn" Dec 09 15:22:14 crc kubenswrapper[4716]: I1209 15:22:14.692469 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/cc820795-3bec-42ce-b838-37592232bfb1-util\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8zv4vn\" (UID: \"cc820795-3bec-42ce-b838-37592232bfb1\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8zv4vn" Dec 09 15:22:14 crc kubenswrapper[4716]: I1209 15:22:14.771107 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f7l29s"] Dec 09 15:22:14 crc kubenswrapper[4716]: I1209 15:22:14.794539 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/cc820795-3bec-42ce-b838-37592232bfb1-bundle\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8zv4vn\" (UID: \"cc820795-3bec-42ce-b838-37592232bfb1\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8zv4vn" Dec 09 15:22:14 crc kubenswrapper[4716]: I1209 15:22:14.794598 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/cc820795-3bec-42ce-b838-37592232bfb1-util\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8zv4vn\" (UID: \"cc820795-3bec-42ce-b838-37592232bfb1\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8zv4vn" Dec 09 15:22:14 crc kubenswrapper[4716]: I1209 15:22:14.794682 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-srrkh\" (UniqueName: \"kubernetes.io/projected/cc820795-3bec-42ce-b838-37592232bfb1-kube-api-access-srrkh\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8zv4vn\" (UID: \"cc820795-3bec-42ce-b838-37592232bfb1\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8zv4vn" Dec 09 15:22:14 crc kubenswrapper[4716]: I1209 15:22:14.795680 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/cc820795-3bec-42ce-b838-37592232bfb1-util\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8zv4vn\" (UID: \"cc820795-3bec-42ce-b838-37592232bfb1\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8zv4vn" Dec 09 15:22:14 crc kubenswrapper[4716]: I1209 15:22:14.795757 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/cc820795-3bec-42ce-b838-37592232bfb1-bundle\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8zv4vn\" (UID: \"cc820795-3bec-42ce-b838-37592232bfb1\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8zv4vn" Dec 09 15:22:14 crc kubenswrapper[4716]: I1209 15:22:14.823851 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-srrkh\" (UniqueName: \"kubernetes.io/projected/cc820795-3bec-42ce-b838-37592232bfb1-kube-api-access-srrkh\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8zv4vn\" (UID: \"cc820795-3bec-42ce-b838-37592232bfb1\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8zv4vn" Dec 09 15:22:14 crc kubenswrapper[4716]: I1209 15:22:14.911069 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8zv4vn" Dec 09 15:22:14 crc kubenswrapper[4716]: I1209 15:22:14.968921 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f7l29s" event={"ID":"a0ebcd27-55c5-41b1-9663-2b880809ff5d","Type":"ContainerStarted","Data":"de342e1ae2220b72658b9d210fc94483828abf728656dbcd1887e39490d67643"} Dec 09 15:22:14 crc kubenswrapper[4716]: I1209 15:22:14.969495 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f7l29s" event={"ID":"a0ebcd27-55c5-41b1-9663-2b880809ff5d","Type":"ContainerStarted","Data":"85c516bde1a71b1a75b3ccb2314f38cfe17c23b24bb286e29d64d4a5d6e34194"} Dec 09 15:22:15 crc kubenswrapper[4716]: I1209 15:22:15.251364 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8zv4vn"] Dec 09 15:22:15 crc kubenswrapper[4716]: I1209 15:22:15.980887 4716 generic.go:334] "Generic (PLEG): container finished" podID="a0ebcd27-55c5-41b1-9663-2b880809ff5d" containerID="de342e1ae2220b72658b9d210fc94483828abf728656dbcd1887e39490d67643" exitCode=0 Dec 09 15:22:15 crc kubenswrapper[4716]: I1209 15:22:15.981296 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f7l29s" event={"ID":"a0ebcd27-55c5-41b1-9663-2b880809ff5d","Type":"ContainerDied","Data":"de342e1ae2220b72658b9d210fc94483828abf728656dbcd1887e39490d67643"} Dec 09 15:22:15 crc kubenswrapper[4716]: I1209 15:22:15.982513 4716 generic.go:334] "Generic (PLEG): container finished" podID="cc820795-3bec-42ce-b838-37592232bfb1" containerID="491dd38f80528b506a7c8453e4795f64ec7643c0c0af7a3be8a1a9031eebc2a4" exitCode=0 Dec 09 15:22:15 crc kubenswrapper[4716]: I1209 15:22:15.982558 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8zv4vn" event={"ID":"cc820795-3bec-42ce-b838-37592232bfb1","Type":"ContainerDied","Data":"491dd38f80528b506a7c8453e4795f64ec7643c0c0af7a3be8a1a9031eebc2a4"} Dec 09 15:22:15 crc kubenswrapper[4716]: I1209 15:22:15.982598 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8zv4vn" event={"ID":"cc820795-3bec-42ce-b838-37592232bfb1","Type":"ContainerStarted","Data":"6c7c8bf744b33357061a7fbd8fb01ab4862a809f71fbc1887542f51880c6d809"} Dec 09 15:22:17 crc kubenswrapper[4716]: I1209 15:22:17.903913 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-mtt9b"] Dec 09 15:22:17 crc kubenswrapper[4716]: I1209 15:22:17.906149 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mtt9b" Dec 09 15:22:17 crc kubenswrapper[4716]: I1209 15:22:17.922959 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 15:22:17 crc kubenswrapper[4716]: I1209 15:22:17.923072 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 15:22:17 crc kubenswrapper[4716]: I1209 15:22:17.930428 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mtt9b"] Dec 09 15:22:18 crc kubenswrapper[4716]: I1209 15:22:18.001322 4716 generic.go:334] "Generic (PLEG): container finished" podID="a0ebcd27-55c5-41b1-9663-2b880809ff5d" containerID="c21fec0c7aa3cd9c1c6d5e85096bc53aad971fe6bdf4250f744a868a41d1393e" exitCode=0 Dec 09 15:22:18 crc kubenswrapper[4716]: I1209 15:22:18.001411 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f7l29s" event={"ID":"a0ebcd27-55c5-41b1-9663-2b880809ff5d","Type":"ContainerDied","Data":"c21fec0c7aa3cd9c1c6d5e85096bc53aad971fe6bdf4250f744a868a41d1393e"} Dec 09 15:22:18 crc kubenswrapper[4716]: I1209 15:22:18.004658 4716 generic.go:334] "Generic (PLEG): container finished" podID="cc820795-3bec-42ce-b838-37592232bfb1" containerID="c2026cda3242683e6db5f37d511257dbb49e0d8a98c98ff59930d16265603198" exitCode=0 Dec 09 15:22:18 crc kubenswrapper[4716]: I1209 15:22:18.004707 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8zv4vn" event={"ID":"cc820795-3bec-42ce-b838-37592232bfb1","Type":"ContainerDied","Data":"c2026cda3242683e6db5f37d511257dbb49e0d8a98c98ff59930d16265603198"} Dec 09 15:22:18 crc kubenswrapper[4716]: I1209 15:22:18.070877 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-92n9v\" (UniqueName: \"kubernetes.io/projected/2cad99aa-e995-4f50-ba50-e5386ad6311d-kube-api-access-92n9v\") pod \"redhat-operators-mtt9b\" (UID: \"2cad99aa-e995-4f50-ba50-e5386ad6311d\") " pod="openshift-marketplace/redhat-operators-mtt9b" Dec 09 15:22:18 crc kubenswrapper[4716]: I1209 15:22:18.070980 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2cad99aa-e995-4f50-ba50-e5386ad6311d-utilities\") pod \"redhat-operators-mtt9b\" (UID: \"2cad99aa-e995-4f50-ba50-e5386ad6311d\") " pod="openshift-marketplace/redhat-operators-mtt9b" Dec 09 15:22:18 crc kubenswrapper[4716]: I1209 15:22:18.071120 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2cad99aa-e995-4f50-ba50-e5386ad6311d-catalog-content\") pod \"redhat-operators-mtt9b\" (UID: \"2cad99aa-e995-4f50-ba50-e5386ad6311d\") " pod="openshift-marketplace/redhat-operators-mtt9b" Dec 09 15:22:18 crc kubenswrapper[4716]: I1209 15:22:18.172817 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2cad99aa-e995-4f50-ba50-e5386ad6311d-catalog-content\") pod \"redhat-operators-mtt9b\" (UID: \"2cad99aa-e995-4f50-ba50-e5386ad6311d\") " pod="openshift-marketplace/redhat-operators-mtt9b" Dec 09 15:22:18 crc kubenswrapper[4716]: I1209 15:22:18.172971 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-92n9v\" (UniqueName: \"kubernetes.io/projected/2cad99aa-e995-4f50-ba50-e5386ad6311d-kube-api-access-92n9v\") pod \"redhat-operators-mtt9b\" (UID: \"2cad99aa-e995-4f50-ba50-e5386ad6311d\") " pod="openshift-marketplace/redhat-operators-mtt9b" Dec 09 15:22:18 crc kubenswrapper[4716]: I1209 15:22:18.173059 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2cad99aa-e995-4f50-ba50-e5386ad6311d-utilities\") pod \"redhat-operators-mtt9b\" (UID: \"2cad99aa-e995-4f50-ba50-e5386ad6311d\") " pod="openshift-marketplace/redhat-operators-mtt9b" Dec 09 15:22:18 crc kubenswrapper[4716]: I1209 15:22:18.173487 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2cad99aa-e995-4f50-ba50-e5386ad6311d-catalog-content\") pod \"redhat-operators-mtt9b\" (UID: \"2cad99aa-e995-4f50-ba50-e5386ad6311d\") " pod="openshift-marketplace/redhat-operators-mtt9b" Dec 09 15:22:18 crc kubenswrapper[4716]: I1209 15:22:18.173580 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2cad99aa-e995-4f50-ba50-e5386ad6311d-utilities\") pod \"redhat-operators-mtt9b\" (UID: \"2cad99aa-e995-4f50-ba50-e5386ad6311d\") " pod="openshift-marketplace/redhat-operators-mtt9b" Dec 09 15:22:18 crc kubenswrapper[4716]: I1209 15:22:18.206307 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-92n9v\" (UniqueName: \"kubernetes.io/projected/2cad99aa-e995-4f50-ba50-e5386ad6311d-kube-api-access-92n9v\") pod \"redhat-operators-mtt9b\" (UID: \"2cad99aa-e995-4f50-ba50-e5386ad6311d\") " pod="openshift-marketplace/redhat-operators-mtt9b" Dec 09 15:22:18 crc kubenswrapper[4716]: I1209 15:22:18.232534 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mtt9b" Dec 09 15:22:18 crc kubenswrapper[4716]: I1209 15:22:18.794015 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mtt9b"] Dec 09 15:22:19 crc kubenswrapper[4716]: I1209 15:22:19.017168 4716 generic.go:334] "Generic (PLEG): container finished" podID="cc820795-3bec-42ce-b838-37592232bfb1" containerID="a111318a6c4da771bd21991eb9555f10514e9b35ad3a392d8808aa940aa82cad" exitCode=0 Dec 09 15:22:19 crc kubenswrapper[4716]: I1209 15:22:19.017421 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8zv4vn" event={"ID":"cc820795-3bec-42ce-b838-37592232bfb1","Type":"ContainerDied","Data":"a111318a6c4da771bd21991eb9555f10514e9b35ad3a392d8808aa940aa82cad"} Dec 09 15:22:19 crc kubenswrapper[4716]: I1209 15:22:19.019815 4716 generic.go:334] "Generic (PLEG): container finished" podID="2cad99aa-e995-4f50-ba50-e5386ad6311d" containerID="a383aa566d07de511ccda5e349ce9bad00641bf59bc2a088ce9162820a535e5a" exitCode=0 Dec 09 15:22:19 crc kubenswrapper[4716]: I1209 15:22:19.019889 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mtt9b" event={"ID":"2cad99aa-e995-4f50-ba50-e5386ad6311d","Type":"ContainerDied","Data":"a383aa566d07de511ccda5e349ce9bad00641bf59bc2a088ce9162820a535e5a"} Dec 09 15:22:19 crc kubenswrapper[4716]: I1209 15:22:19.019919 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mtt9b" event={"ID":"2cad99aa-e995-4f50-ba50-e5386ad6311d","Type":"ContainerStarted","Data":"95d0c65468e99269358cbc7caa8a3aa2d3e0534a42fa7fda567dbd63f236d114"} Dec 09 15:22:19 crc kubenswrapper[4716]: I1209 15:22:19.028961 4716 generic.go:334] "Generic (PLEG): container finished" podID="a0ebcd27-55c5-41b1-9663-2b880809ff5d" containerID="e4cacffbca32adf9d8a2841bed6849e96c3e4c4da1af6f46c28a6b8c0db44b37" exitCode=0 Dec 09 15:22:19 crc kubenswrapper[4716]: I1209 15:22:19.029031 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f7l29s" event={"ID":"a0ebcd27-55c5-41b1-9663-2b880809ff5d","Type":"ContainerDied","Data":"e4cacffbca32adf9d8a2841bed6849e96c3e4c4da1af6f46c28a6b8c0db44b37"} Dec 09 15:22:20 crc kubenswrapper[4716]: I1209 15:22:20.394262 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8zv4vn" Dec 09 15:22:20 crc kubenswrapper[4716]: I1209 15:22:20.402070 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f7l29s" Dec 09 15:22:20 crc kubenswrapper[4716]: I1209 15:22:20.528532 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rxzx6\" (UniqueName: \"kubernetes.io/projected/a0ebcd27-55c5-41b1-9663-2b880809ff5d-kube-api-access-rxzx6\") pod \"a0ebcd27-55c5-41b1-9663-2b880809ff5d\" (UID: \"a0ebcd27-55c5-41b1-9663-2b880809ff5d\") " Dec 09 15:22:20 crc kubenswrapper[4716]: I1209 15:22:20.528736 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-srrkh\" (UniqueName: \"kubernetes.io/projected/cc820795-3bec-42ce-b838-37592232bfb1-kube-api-access-srrkh\") pod \"cc820795-3bec-42ce-b838-37592232bfb1\" (UID: \"cc820795-3bec-42ce-b838-37592232bfb1\") " Dec 09 15:22:20 crc kubenswrapper[4716]: I1209 15:22:20.528809 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/cc820795-3bec-42ce-b838-37592232bfb1-util\") pod \"cc820795-3bec-42ce-b838-37592232bfb1\" (UID: \"cc820795-3bec-42ce-b838-37592232bfb1\") " Dec 09 15:22:20 crc kubenswrapper[4716]: I1209 15:22:20.528865 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a0ebcd27-55c5-41b1-9663-2b880809ff5d-util\") pod \"a0ebcd27-55c5-41b1-9663-2b880809ff5d\" (UID: \"a0ebcd27-55c5-41b1-9663-2b880809ff5d\") " Dec 09 15:22:20 crc kubenswrapper[4716]: I1209 15:22:20.528956 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a0ebcd27-55c5-41b1-9663-2b880809ff5d-bundle\") pod \"a0ebcd27-55c5-41b1-9663-2b880809ff5d\" (UID: \"a0ebcd27-55c5-41b1-9663-2b880809ff5d\") " Dec 09 15:22:20 crc kubenswrapper[4716]: I1209 15:22:20.529005 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/cc820795-3bec-42ce-b838-37592232bfb1-bundle\") pod \"cc820795-3bec-42ce-b838-37592232bfb1\" (UID: \"cc820795-3bec-42ce-b838-37592232bfb1\") " Dec 09 15:22:20 crc kubenswrapper[4716]: I1209 15:22:20.530431 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cc820795-3bec-42ce-b838-37592232bfb1-bundle" (OuterVolumeSpecName: "bundle") pod "cc820795-3bec-42ce-b838-37592232bfb1" (UID: "cc820795-3bec-42ce-b838-37592232bfb1"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:22:20 crc kubenswrapper[4716]: I1209 15:22:20.531563 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a0ebcd27-55c5-41b1-9663-2b880809ff5d-bundle" (OuterVolumeSpecName: "bundle") pod "a0ebcd27-55c5-41b1-9663-2b880809ff5d" (UID: "a0ebcd27-55c5-41b1-9663-2b880809ff5d"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:22:20 crc kubenswrapper[4716]: I1209 15:22:20.543664 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a0ebcd27-55c5-41b1-9663-2b880809ff5d-util" (OuterVolumeSpecName: "util") pod "a0ebcd27-55c5-41b1-9663-2b880809ff5d" (UID: "a0ebcd27-55c5-41b1-9663-2b880809ff5d"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:22:20 crc kubenswrapper[4716]: I1209 15:22:20.546712 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cc820795-3bec-42ce-b838-37592232bfb1-util" (OuterVolumeSpecName: "util") pod "cc820795-3bec-42ce-b838-37592232bfb1" (UID: "cc820795-3bec-42ce-b838-37592232bfb1"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:22:20 crc kubenswrapper[4716]: I1209 15:22:20.558425 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc820795-3bec-42ce-b838-37592232bfb1-kube-api-access-srrkh" (OuterVolumeSpecName: "kube-api-access-srrkh") pod "cc820795-3bec-42ce-b838-37592232bfb1" (UID: "cc820795-3bec-42ce-b838-37592232bfb1"). InnerVolumeSpecName "kube-api-access-srrkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:22:20 crc kubenswrapper[4716]: I1209 15:22:20.558898 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0ebcd27-55c5-41b1-9663-2b880809ff5d-kube-api-access-rxzx6" (OuterVolumeSpecName: "kube-api-access-rxzx6") pod "a0ebcd27-55c5-41b1-9663-2b880809ff5d" (UID: "a0ebcd27-55c5-41b1-9663-2b880809ff5d"). InnerVolumeSpecName "kube-api-access-rxzx6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:22:20 crc kubenswrapper[4716]: I1209 15:22:20.631265 4716 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a0ebcd27-55c5-41b1-9663-2b880809ff5d-util\") on node \"crc\" DevicePath \"\"" Dec 09 15:22:20 crc kubenswrapper[4716]: I1209 15:22:20.631330 4716 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a0ebcd27-55c5-41b1-9663-2b880809ff5d-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:22:20 crc kubenswrapper[4716]: I1209 15:22:20.631343 4716 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/cc820795-3bec-42ce-b838-37592232bfb1-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:22:20 crc kubenswrapper[4716]: I1209 15:22:20.631355 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rxzx6\" (UniqueName: \"kubernetes.io/projected/a0ebcd27-55c5-41b1-9663-2b880809ff5d-kube-api-access-rxzx6\") on node \"crc\" DevicePath \"\"" Dec 09 15:22:20 crc kubenswrapper[4716]: I1209 15:22:20.631372 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-srrkh\" (UniqueName: \"kubernetes.io/projected/cc820795-3bec-42ce-b838-37592232bfb1-kube-api-access-srrkh\") on node \"crc\" DevicePath \"\"" Dec 09 15:22:20 crc kubenswrapper[4716]: I1209 15:22:20.631383 4716 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/cc820795-3bec-42ce-b838-37592232bfb1-util\") on node \"crc\" DevicePath \"\"" Dec 09 15:22:21 crc kubenswrapper[4716]: I1209 15:22:21.046957 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f7l29s" event={"ID":"a0ebcd27-55c5-41b1-9663-2b880809ff5d","Type":"ContainerDied","Data":"85c516bde1a71b1a75b3ccb2314f38cfe17c23b24bb286e29d64d4a5d6e34194"} Dec 09 15:22:21 crc kubenswrapper[4716]: I1209 15:22:21.047055 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="85c516bde1a71b1a75b3ccb2314f38cfe17c23b24bb286e29d64d4a5d6e34194" Dec 09 15:22:21 crc kubenswrapper[4716]: I1209 15:22:21.047828 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f7l29s" Dec 09 15:22:21 crc kubenswrapper[4716]: I1209 15:22:21.050211 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8zv4vn" event={"ID":"cc820795-3bec-42ce-b838-37592232bfb1","Type":"ContainerDied","Data":"6c7c8bf744b33357061a7fbd8fb01ab4862a809f71fbc1887542f51880c6d809"} Dec 09 15:22:21 crc kubenswrapper[4716]: I1209 15:22:21.050266 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8zv4vn" Dec 09 15:22:21 crc kubenswrapper[4716]: I1209 15:22:21.050272 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6c7c8bf744b33357061a7fbd8fb01ab4862a809f71fbc1887542f51880c6d809" Dec 09 15:22:21 crc kubenswrapper[4716]: I1209 15:22:21.055210 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mtt9b" event={"ID":"2cad99aa-e995-4f50-ba50-e5386ad6311d","Type":"ContainerStarted","Data":"a8dfacfecc3be235dcc07d6cf0fd651aeab319a275bc99520e6a74ee8ecb480c"} Dec 09 15:22:25 crc kubenswrapper[4716]: I1209 15:22:25.101981 4716 generic.go:334] "Generic (PLEG): container finished" podID="2cad99aa-e995-4f50-ba50-e5386ad6311d" containerID="a8dfacfecc3be235dcc07d6cf0fd651aeab319a275bc99520e6a74ee8ecb480c" exitCode=0 Dec 09 15:22:25 crc kubenswrapper[4716]: I1209 15:22:25.102066 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mtt9b" event={"ID":"2cad99aa-e995-4f50-ba50-e5386ad6311d","Type":"ContainerDied","Data":"a8dfacfecc3be235dcc07d6cf0fd651aeab319a275bc99520e6a74ee8ecb480c"} Dec 09 15:22:26 crc kubenswrapper[4716]: I1209 15:22:26.112892 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mtt9b" event={"ID":"2cad99aa-e995-4f50-ba50-e5386ad6311d","Type":"ContainerStarted","Data":"761bcf15803e94288cb8cc07174c1bf1f7edac0122f415c332cd9e59cb3d22e6"} Dec 09 15:22:26 crc kubenswrapper[4716]: I1209 15:22:26.136855 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-mtt9b" podStartSLOduration=2.601073476 podStartE2EDuration="9.13683039s" podCreationTimestamp="2025-12-09 15:22:17 +0000 UTC" firstStartedPulling="2025-12-09 15:22:19.021664096 +0000 UTC m=+826.176408084" lastFinishedPulling="2025-12-09 15:22:25.55742101 +0000 UTC m=+832.712164998" observedRunningTime="2025-12-09 15:22:26.132530195 +0000 UTC m=+833.287274183" watchObservedRunningTime="2025-12-09 15:22:26.13683039 +0000 UTC m=+833.291574388" Dec 09 15:22:28 crc kubenswrapper[4716]: I1209 15:22:28.233708 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-mtt9b" Dec 09 15:22:28 crc kubenswrapper[4716]: I1209 15:22:28.234467 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-mtt9b" Dec 09 15:22:29 crc kubenswrapper[4716]: I1209 15:22:29.294528 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-mtt9b" podUID="2cad99aa-e995-4f50-ba50-e5386ad6311d" containerName="registry-server" probeResult="failure" output=< Dec 09 15:22:29 crc kubenswrapper[4716]: timeout: failed to connect service ":50051" within 1s Dec 09 15:22:29 crc kubenswrapper[4716]: > Dec 09 15:22:31 crc kubenswrapper[4716]: I1209 15:22:31.703114 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-9bd696f86-gbtzx"] Dec 09 15:22:31 crc kubenswrapper[4716]: E1209 15:22:31.704016 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc820795-3bec-42ce-b838-37592232bfb1" containerName="pull" Dec 09 15:22:31 crc kubenswrapper[4716]: I1209 15:22:31.704037 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc820795-3bec-42ce-b838-37592232bfb1" containerName="pull" Dec 09 15:22:31 crc kubenswrapper[4716]: E1209 15:22:31.704066 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0ebcd27-55c5-41b1-9663-2b880809ff5d" containerName="util" Dec 09 15:22:31 crc kubenswrapper[4716]: I1209 15:22:31.704074 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0ebcd27-55c5-41b1-9663-2b880809ff5d" containerName="util" Dec 09 15:22:31 crc kubenswrapper[4716]: E1209 15:22:31.704085 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0ebcd27-55c5-41b1-9663-2b880809ff5d" containerName="extract" Dec 09 15:22:31 crc kubenswrapper[4716]: I1209 15:22:31.704095 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0ebcd27-55c5-41b1-9663-2b880809ff5d" containerName="extract" Dec 09 15:22:31 crc kubenswrapper[4716]: E1209 15:22:31.704107 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0ebcd27-55c5-41b1-9663-2b880809ff5d" containerName="pull" Dec 09 15:22:31 crc kubenswrapper[4716]: I1209 15:22:31.704114 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0ebcd27-55c5-41b1-9663-2b880809ff5d" containerName="pull" Dec 09 15:22:31 crc kubenswrapper[4716]: E1209 15:22:31.704126 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc820795-3bec-42ce-b838-37592232bfb1" containerName="extract" Dec 09 15:22:31 crc kubenswrapper[4716]: I1209 15:22:31.704136 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc820795-3bec-42ce-b838-37592232bfb1" containerName="extract" Dec 09 15:22:31 crc kubenswrapper[4716]: E1209 15:22:31.704156 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc820795-3bec-42ce-b838-37592232bfb1" containerName="util" Dec 09 15:22:31 crc kubenswrapper[4716]: I1209 15:22:31.704164 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc820795-3bec-42ce-b838-37592232bfb1" containerName="util" Dec 09 15:22:31 crc kubenswrapper[4716]: I1209 15:22:31.704368 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc820795-3bec-42ce-b838-37592232bfb1" containerName="extract" Dec 09 15:22:31 crc kubenswrapper[4716]: I1209 15:22:31.704392 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0ebcd27-55c5-41b1-9663-2b880809ff5d" containerName="extract" Dec 09 15:22:31 crc kubenswrapper[4716]: I1209 15:22:31.705567 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators-redhat/loki-operator-controller-manager-9bd696f86-gbtzx" Dec 09 15:22:31 crc kubenswrapper[4716]: I1209 15:22:31.709358 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-controller-manager-dockercfg-fzfpm" Dec 09 15:22:31 crc kubenswrapper[4716]: I1209 15:22:31.709553 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"openshift-service-ca.crt" Dec 09 15:22:31 crc kubenswrapper[4716]: I1209 15:22:31.709636 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-controller-manager-service-cert" Dec 09 15:22:31 crc kubenswrapper[4716]: I1209 15:22:31.710111 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-metrics" Dec 09 15:22:31 crc kubenswrapper[4716]: I1209 15:22:31.710639 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"loki-operator-manager-config" Dec 09 15:22:31 crc kubenswrapper[4716]: I1209 15:22:31.712124 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"kube-root-ca.crt" Dec 09 15:22:31 crc kubenswrapper[4716]: I1209 15:22:31.727884 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-9bd696f86-gbtzx"] Dec 09 15:22:31 crc kubenswrapper[4716]: I1209 15:22:31.839715 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/b564ab42-fe8b-4b45-a502-0d10a2cedd07-webhook-cert\") pod \"loki-operator-controller-manager-9bd696f86-gbtzx\" (UID: \"b564ab42-fe8b-4b45-a502-0d10a2cedd07\") " pod="openshift-operators-redhat/loki-operator-controller-manager-9bd696f86-gbtzx" Dec 09 15:22:31 crc kubenswrapper[4716]: I1209 15:22:31.839803 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b564ab42-fe8b-4b45-a502-0d10a2cedd07-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-9bd696f86-gbtzx\" (UID: \"b564ab42-fe8b-4b45-a502-0d10a2cedd07\") " pod="openshift-operators-redhat/loki-operator-controller-manager-9bd696f86-gbtzx" Dec 09 15:22:31 crc kubenswrapper[4716]: I1209 15:22:31.839869 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/b564ab42-fe8b-4b45-a502-0d10a2cedd07-apiservice-cert\") pod \"loki-operator-controller-manager-9bd696f86-gbtzx\" (UID: \"b564ab42-fe8b-4b45-a502-0d10a2cedd07\") " pod="openshift-operators-redhat/loki-operator-controller-manager-9bd696f86-gbtzx" Dec 09 15:22:31 crc kubenswrapper[4716]: I1209 15:22:31.839930 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/b564ab42-fe8b-4b45-a502-0d10a2cedd07-manager-config\") pod \"loki-operator-controller-manager-9bd696f86-gbtzx\" (UID: \"b564ab42-fe8b-4b45-a502-0d10a2cedd07\") " pod="openshift-operators-redhat/loki-operator-controller-manager-9bd696f86-gbtzx" Dec 09 15:22:31 crc kubenswrapper[4716]: I1209 15:22:31.839955 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2f27t\" (UniqueName: \"kubernetes.io/projected/b564ab42-fe8b-4b45-a502-0d10a2cedd07-kube-api-access-2f27t\") pod \"loki-operator-controller-manager-9bd696f86-gbtzx\" (UID: \"b564ab42-fe8b-4b45-a502-0d10a2cedd07\") " pod="openshift-operators-redhat/loki-operator-controller-manager-9bd696f86-gbtzx" Dec 09 15:22:31 crc kubenswrapper[4716]: I1209 15:22:31.941796 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/b564ab42-fe8b-4b45-a502-0d10a2cedd07-webhook-cert\") pod \"loki-operator-controller-manager-9bd696f86-gbtzx\" (UID: \"b564ab42-fe8b-4b45-a502-0d10a2cedd07\") " pod="openshift-operators-redhat/loki-operator-controller-manager-9bd696f86-gbtzx" Dec 09 15:22:31 crc kubenswrapper[4716]: I1209 15:22:31.941871 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b564ab42-fe8b-4b45-a502-0d10a2cedd07-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-9bd696f86-gbtzx\" (UID: \"b564ab42-fe8b-4b45-a502-0d10a2cedd07\") " pod="openshift-operators-redhat/loki-operator-controller-manager-9bd696f86-gbtzx" Dec 09 15:22:31 crc kubenswrapper[4716]: I1209 15:22:31.941917 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/b564ab42-fe8b-4b45-a502-0d10a2cedd07-apiservice-cert\") pod \"loki-operator-controller-manager-9bd696f86-gbtzx\" (UID: \"b564ab42-fe8b-4b45-a502-0d10a2cedd07\") " pod="openshift-operators-redhat/loki-operator-controller-manager-9bd696f86-gbtzx" Dec 09 15:22:31 crc kubenswrapper[4716]: I1209 15:22:31.941950 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/b564ab42-fe8b-4b45-a502-0d10a2cedd07-manager-config\") pod \"loki-operator-controller-manager-9bd696f86-gbtzx\" (UID: \"b564ab42-fe8b-4b45-a502-0d10a2cedd07\") " pod="openshift-operators-redhat/loki-operator-controller-manager-9bd696f86-gbtzx" Dec 09 15:22:31 crc kubenswrapper[4716]: I1209 15:22:31.943258 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2f27t\" (UniqueName: \"kubernetes.io/projected/b564ab42-fe8b-4b45-a502-0d10a2cedd07-kube-api-access-2f27t\") pod \"loki-operator-controller-manager-9bd696f86-gbtzx\" (UID: \"b564ab42-fe8b-4b45-a502-0d10a2cedd07\") " pod="openshift-operators-redhat/loki-operator-controller-manager-9bd696f86-gbtzx" Dec 09 15:22:31 crc kubenswrapper[4716]: I1209 15:22:31.949477 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/b564ab42-fe8b-4b45-a502-0d10a2cedd07-webhook-cert\") pod \"loki-operator-controller-manager-9bd696f86-gbtzx\" (UID: \"b564ab42-fe8b-4b45-a502-0d10a2cedd07\") " pod="openshift-operators-redhat/loki-operator-controller-manager-9bd696f86-gbtzx" Dec 09 15:22:31 crc kubenswrapper[4716]: I1209 15:22:31.959430 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b564ab42-fe8b-4b45-a502-0d10a2cedd07-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-9bd696f86-gbtzx\" (UID: \"b564ab42-fe8b-4b45-a502-0d10a2cedd07\") " pod="openshift-operators-redhat/loki-operator-controller-manager-9bd696f86-gbtzx" Dec 09 15:22:31 crc kubenswrapper[4716]: I1209 15:22:31.970609 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/b564ab42-fe8b-4b45-a502-0d10a2cedd07-apiservice-cert\") pod \"loki-operator-controller-manager-9bd696f86-gbtzx\" (UID: \"b564ab42-fe8b-4b45-a502-0d10a2cedd07\") " pod="openshift-operators-redhat/loki-operator-controller-manager-9bd696f86-gbtzx" Dec 09 15:22:31 crc kubenswrapper[4716]: I1209 15:22:31.970917 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/b564ab42-fe8b-4b45-a502-0d10a2cedd07-manager-config\") pod \"loki-operator-controller-manager-9bd696f86-gbtzx\" (UID: \"b564ab42-fe8b-4b45-a502-0d10a2cedd07\") " pod="openshift-operators-redhat/loki-operator-controller-manager-9bd696f86-gbtzx" Dec 09 15:22:32 crc kubenswrapper[4716]: I1209 15:22:32.004798 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2f27t\" (UniqueName: \"kubernetes.io/projected/b564ab42-fe8b-4b45-a502-0d10a2cedd07-kube-api-access-2f27t\") pod \"loki-operator-controller-manager-9bd696f86-gbtzx\" (UID: \"b564ab42-fe8b-4b45-a502-0d10a2cedd07\") " pod="openshift-operators-redhat/loki-operator-controller-manager-9bd696f86-gbtzx" Dec 09 15:22:32 crc kubenswrapper[4716]: I1209 15:22:32.025775 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators-redhat/loki-operator-controller-manager-9bd696f86-gbtzx" Dec 09 15:22:32 crc kubenswrapper[4716]: I1209 15:22:32.495968 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-9bd696f86-gbtzx"] Dec 09 15:22:33 crc kubenswrapper[4716]: I1209 15:22:33.181507 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-9bd696f86-gbtzx" event={"ID":"b564ab42-fe8b-4b45-a502-0d10a2cedd07","Type":"ContainerStarted","Data":"a38c5a7b67c15d696ed2ac6c417cc025f31a4c1130cf8792fc97970b0d3236bc"} Dec 09 15:22:34 crc kubenswrapper[4716]: I1209 15:22:34.451311 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/cluster-logging-operator-ff9846bd-vvvc4"] Dec 09 15:22:34 crc kubenswrapper[4716]: I1209 15:22:34.452922 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/cluster-logging-operator-ff9846bd-vvvc4" Dec 09 15:22:34 crc kubenswrapper[4716]: I1209 15:22:34.456528 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"openshift-service-ca.crt" Dec 09 15:22:34 crc kubenswrapper[4716]: I1209 15:22:34.456701 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"cluster-logging-operator-dockercfg-sbm6c" Dec 09 15:22:34 crc kubenswrapper[4716]: I1209 15:22:34.456715 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"kube-root-ca.crt" Dec 09 15:22:34 crc kubenswrapper[4716]: I1209 15:22:34.526963 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lgx8n\" (UniqueName: \"kubernetes.io/projected/90319519-b1f6-4387-aa09-eacd3c5b33ff-kube-api-access-lgx8n\") pod \"cluster-logging-operator-ff9846bd-vvvc4\" (UID: \"90319519-b1f6-4387-aa09-eacd3c5b33ff\") " pod="openshift-logging/cluster-logging-operator-ff9846bd-vvvc4" Dec 09 15:22:34 crc kubenswrapper[4716]: I1209 15:22:34.557164 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/cluster-logging-operator-ff9846bd-vvvc4"] Dec 09 15:22:34 crc kubenswrapper[4716]: I1209 15:22:34.629376 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lgx8n\" (UniqueName: \"kubernetes.io/projected/90319519-b1f6-4387-aa09-eacd3c5b33ff-kube-api-access-lgx8n\") pod \"cluster-logging-operator-ff9846bd-vvvc4\" (UID: \"90319519-b1f6-4387-aa09-eacd3c5b33ff\") " pod="openshift-logging/cluster-logging-operator-ff9846bd-vvvc4" Dec 09 15:22:34 crc kubenswrapper[4716]: I1209 15:22:34.669326 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lgx8n\" (UniqueName: \"kubernetes.io/projected/90319519-b1f6-4387-aa09-eacd3c5b33ff-kube-api-access-lgx8n\") pod \"cluster-logging-operator-ff9846bd-vvvc4\" (UID: \"90319519-b1f6-4387-aa09-eacd3c5b33ff\") " pod="openshift-logging/cluster-logging-operator-ff9846bd-vvvc4" Dec 09 15:22:34 crc kubenswrapper[4716]: I1209 15:22:34.771410 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/cluster-logging-operator-ff9846bd-vvvc4" Dec 09 15:22:35 crc kubenswrapper[4716]: I1209 15:22:35.285773 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/cluster-logging-operator-ff9846bd-vvvc4"] Dec 09 15:22:35 crc kubenswrapper[4716]: W1209 15:22:35.326481 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod90319519_b1f6_4387_aa09_eacd3c5b33ff.slice/crio-82fd5bd3a9efa7e8caea2b7c36c0cb759a86ea9fc70d945a03e538a7c0e86fd5 WatchSource:0}: Error finding container 82fd5bd3a9efa7e8caea2b7c36c0cb759a86ea9fc70d945a03e538a7c0e86fd5: Status 404 returned error can't find the container with id 82fd5bd3a9efa7e8caea2b7c36c0cb759a86ea9fc70d945a03e538a7c0e86fd5 Dec 09 15:22:36 crc kubenswrapper[4716]: I1209 15:22:36.210829 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/cluster-logging-operator-ff9846bd-vvvc4" event={"ID":"90319519-b1f6-4387-aa09-eacd3c5b33ff","Type":"ContainerStarted","Data":"82fd5bd3a9efa7e8caea2b7c36c0cb759a86ea9fc70d945a03e538a7c0e86fd5"} Dec 09 15:22:38 crc kubenswrapper[4716]: I1209 15:22:38.342487 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-mtt9b" Dec 09 15:22:38 crc kubenswrapper[4716]: I1209 15:22:38.405950 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-mtt9b" Dec 09 15:22:41 crc kubenswrapper[4716]: I1209 15:22:41.700675 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mtt9b"] Dec 09 15:22:41 crc kubenswrapper[4716]: I1209 15:22:41.701373 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-mtt9b" podUID="2cad99aa-e995-4f50-ba50-e5386ad6311d" containerName="registry-server" containerID="cri-o://761bcf15803e94288cb8cc07174c1bf1f7edac0122f415c332cd9e59cb3d22e6" gracePeriod=2 Dec 09 15:22:42 crc kubenswrapper[4716]: I1209 15:22:42.298860 4716 generic.go:334] "Generic (PLEG): container finished" podID="2cad99aa-e995-4f50-ba50-e5386ad6311d" containerID="761bcf15803e94288cb8cc07174c1bf1f7edac0122f415c332cd9e59cb3d22e6" exitCode=0 Dec 09 15:22:42 crc kubenswrapper[4716]: I1209 15:22:42.298905 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mtt9b" event={"ID":"2cad99aa-e995-4f50-ba50-e5386ad6311d","Type":"ContainerDied","Data":"761bcf15803e94288cb8cc07174c1bf1f7edac0122f415c332cd9e59cb3d22e6"} Dec 09 15:22:47 crc kubenswrapper[4716]: I1209 15:22:47.195486 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mtt9b" Dec 09 15:22:47 crc kubenswrapper[4716]: I1209 15:22:47.345064 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2cad99aa-e995-4f50-ba50-e5386ad6311d-utilities\") pod \"2cad99aa-e995-4f50-ba50-e5386ad6311d\" (UID: \"2cad99aa-e995-4f50-ba50-e5386ad6311d\") " Dec 09 15:22:47 crc kubenswrapper[4716]: I1209 15:22:47.347796 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2cad99aa-e995-4f50-ba50-e5386ad6311d-catalog-content\") pod \"2cad99aa-e995-4f50-ba50-e5386ad6311d\" (UID: \"2cad99aa-e995-4f50-ba50-e5386ad6311d\") " Dec 09 15:22:47 crc kubenswrapper[4716]: I1209 15:22:47.346313 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2cad99aa-e995-4f50-ba50-e5386ad6311d-utilities" (OuterVolumeSpecName: "utilities") pod "2cad99aa-e995-4f50-ba50-e5386ad6311d" (UID: "2cad99aa-e995-4f50-ba50-e5386ad6311d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:22:47 crc kubenswrapper[4716]: I1209 15:22:47.350038 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-92n9v\" (UniqueName: \"kubernetes.io/projected/2cad99aa-e995-4f50-ba50-e5386ad6311d-kube-api-access-92n9v\") pod \"2cad99aa-e995-4f50-ba50-e5386ad6311d\" (UID: \"2cad99aa-e995-4f50-ba50-e5386ad6311d\") " Dec 09 15:22:47 crc kubenswrapper[4716]: I1209 15:22:47.350684 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-9bd696f86-gbtzx" event={"ID":"b564ab42-fe8b-4b45-a502-0d10a2cedd07","Type":"ContainerStarted","Data":"d974ab7bdc3122c50be1ef109138fd8588e74cd7e70b17f9929f9c923d55ec86"} Dec 09 15:22:47 crc kubenswrapper[4716]: I1209 15:22:47.351894 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2cad99aa-e995-4f50-ba50-e5386ad6311d-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 15:22:47 crc kubenswrapper[4716]: I1209 15:22:47.354105 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/cluster-logging-operator-ff9846bd-vvvc4" event={"ID":"90319519-b1f6-4387-aa09-eacd3c5b33ff","Type":"ContainerStarted","Data":"31aa20a35b56e816499ac4f54068886fd9d15dcd7f4da8165984f099577a6416"} Dec 09 15:22:47 crc kubenswrapper[4716]: I1209 15:22:47.360453 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mtt9b" Dec 09 15:22:47 crc kubenswrapper[4716]: I1209 15:22:47.361195 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mtt9b" event={"ID":"2cad99aa-e995-4f50-ba50-e5386ad6311d","Type":"ContainerDied","Data":"95d0c65468e99269358cbc7caa8a3aa2d3e0534a42fa7fda567dbd63f236d114"} Dec 09 15:22:47 crc kubenswrapper[4716]: I1209 15:22:47.361354 4716 scope.go:117] "RemoveContainer" containerID="761bcf15803e94288cb8cc07174c1bf1f7edac0122f415c332cd9e59cb3d22e6" Dec 09 15:22:47 crc kubenswrapper[4716]: I1209 15:22:47.366843 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2cad99aa-e995-4f50-ba50-e5386ad6311d-kube-api-access-92n9v" (OuterVolumeSpecName: "kube-api-access-92n9v") pod "2cad99aa-e995-4f50-ba50-e5386ad6311d" (UID: "2cad99aa-e995-4f50-ba50-e5386ad6311d"). InnerVolumeSpecName "kube-api-access-92n9v". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:22:47 crc kubenswrapper[4716]: I1209 15:22:47.383760 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/cluster-logging-operator-ff9846bd-vvvc4" podStartSLOduration=1.716482209 podStartE2EDuration="13.38372519s" podCreationTimestamp="2025-12-09 15:22:34 +0000 UTC" firstStartedPulling="2025-12-09 15:22:35.334215666 +0000 UTC m=+842.488959654" lastFinishedPulling="2025-12-09 15:22:47.001458627 +0000 UTC m=+854.156202635" observedRunningTime="2025-12-09 15:22:47.383173584 +0000 UTC m=+854.537917562" watchObservedRunningTime="2025-12-09 15:22:47.38372519 +0000 UTC m=+854.538469178" Dec 09 15:22:47 crc kubenswrapper[4716]: I1209 15:22:47.391119 4716 scope.go:117] "RemoveContainer" containerID="a8dfacfecc3be235dcc07d6cf0fd651aeab319a275bc99520e6a74ee8ecb480c" Dec 09 15:22:47 crc kubenswrapper[4716]: I1209 15:22:47.461448 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-92n9v\" (UniqueName: \"kubernetes.io/projected/2cad99aa-e995-4f50-ba50-e5386ad6311d-kube-api-access-92n9v\") on node \"crc\" DevicePath \"\"" Dec 09 15:22:47 crc kubenswrapper[4716]: I1209 15:22:47.479002 4716 scope.go:117] "RemoveContainer" containerID="a383aa566d07de511ccda5e349ce9bad00641bf59bc2a088ce9162820a535e5a" Dec 09 15:22:47 crc kubenswrapper[4716]: I1209 15:22:47.565074 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2cad99aa-e995-4f50-ba50-e5386ad6311d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2cad99aa-e995-4f50-ba50-e5386ad6311d" (UID: "2cad99aa-e995-4f50-ba50-e5386ad6311d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:22:47 crc kubenswrapper[4716]: I1209 15:22:47.665327 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2cad99aa-e995-4f50-ba50-e5386ad6311d-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 15:22:47 crc kubenswrapper[4716]: I1209 15:22:47.695774 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mtt9b"] Dec 09 15:22:47 crc kubenswrapper[4716]: I1209 15:22:47.702328 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-mtt9b"] Dec 09 15:22:47 crc kubenswrapper[4716]: I1209 15:22:47.922524 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 15:22:47 crc kubenswrapper[4716]: I1209 15:22:47.922640 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 15:22:47 crc kubenswrapper[4716]: I1209 15:22:47.922728 4716 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" Dec 09 15:22:47 crc kubenswrapper[4716]: I1209 15:22:47.923676 4716 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"926bc2d973bc4613f2c81809b0dc284f4ef49b03a208dfa2c446ce01a7ee38bc"} pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 15:22:47 crc kubenswrapper[4716]: I1209 15:22:47.923743 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" containerID="cri-o://926bc2d973bc4613f2c81809b0dc284f4ef49b03a208dfa2c446ce01a7ee38bc" gracePeriod=600 Dec 09 15:22:48 crc kubenswrapper[4716]: I1209 15:22:48.381026 4716 generic.go:334] "Generic (PLEG): container finished" podID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerID="926bc2d973bc4613f2c81809b0dc284f4ef49b03a208dfa2c446ce01a7ee38bc" exitCode=0 Dec 09 15:22:48 crc kubenswrapper[4716]: I1209 15:22:48.381225 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerDied","Data":"926bc2d973bc4613f2c81809b0dc284f4ef49b03a208dfa2c446ce01a7ee38bc"} Dec 09 15:22:48 crc kubenswrapper[4716]: I1209 15:22:48.381645 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerStarted","Data":"34321690e545d4c8bbe42a5bb706305ba7bf7764ef04b070ddeba60a6895e655"} Dec 09 15:22:48 crc kubenswrapper[4716]: I1209 15:22:48.381675 4716 scope.go:117] "RemoveContainer" containerID="7ac024156aad5546738363991b9b7ef9a0dffb92b890f8d72d6755b45adfb383" Dec 09 15:22:49 crc kubenswrapper[4716]: I1209 15:22:49.228878 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2cad99aa-e995-4f50-ba50-e5386ad6311d" path="/var/lib/kubelet/pods/2cad99aa-e995-4f50-ba50-e5386ad6311d/volumes" Dec 09 15:22:54 crc kubenswrapper[4716]: I1209 15:22:54.501739 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-9bd696f86-gbtzx" event={"ID":"b564ab42-fe8b-4b45-a502-0d10a2cedd07","Type":"ContainerStarted","Data":"fde1b00c642f244b7da192999c951f6604ed11c6c1db65e99fff8c6684d0d684"} Dec 09 15:22:54 crc kubenswrapper[4716]: I1209 15:22:54.502512 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators-redhat/loki-operator-controller-manager-9bd696f86-gbtzx" Dec 09 15:22:54 crc kubenswrapper[4716]: I1209 15:22:54.540275 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators-redhat/loki-operator-controller-manager-9bd696f86-gbtzx" Dec 09 15:22:54 crc kubenswrapper[4716]: I1209 15:22:54.548051 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators-redhat/loki-operator-controller-manager-9bd696f86-gbtzx" podStartSLOduration=2.270715676 podStartE2EDuration="23.548025508s" podCreationTimestamp="2025-12-09 15:22:31 +0000 UTC" firstStartedPulling="2025-12-09 15:22:32.51108579 +0000 UTC m=+839.665829768" lastFinishedPulling="2025-12-09 15:22:53.788395612 +0000 UTC m=+860.943139600" observedRunningTime="2025-12-09 15:22:54.541003354 +0000 UTC m=+861.695747342" watchObservedRunningTime="2025-12-09 15:22:54.548025508 +0000 UTC m=+861.702769486" Dec 09 15:22:59 crc kubenswrapper[4716]: I1209 15:22:59.647135 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["minio-dev/minio"] Dec 09 15:22:59 crc kubenswrapper[4716]: E1209 15:22:59.648264 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2cad99aa-e995-4f50-ba50-e5386ad6311d" containerName="registry-server" Dec 09 15:22:59 crc kubenswrapper[4716]: I1209 15:22:59.648283 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="2cad99aa-e995-4f50-ba50-e5386ad6311d" containerName="registry-server" Dec 09 15:22:59 crc kubenswrapper[4716]: E1209 15:22:59.648299 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2cad99aa-e995-4f50-ba50-e5386ad6311d" containerName="extract-content" Dec 09 15:22:59 crc kubenswrapper[4716]: I1209 15:22:59.648307 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="2cad99aa-e995-4f50-ba50-e5386ad6311d" containerName="extract-content" Dec 09 15:22:59 crc kubenswrapper[4716]: E1209 15:22:59.648338 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2cad99aa-e995-4f50-ba50-e5386ad6311d" containerName="extract-utilities" Dec 09 15:22:59 crc kubenswrapper[4716]: I1209 15:22:59.648350 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="2cad99aa-e995-4f50-ba50-e5386ad6311d" containerName="extract-utilities" Dec 09 15:22:59 crc kubenswrapper[4716]: I1209 15:22:59.648509 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="2cad99aa-e995-4f50-ba50-e5386ad6311d" containerName="registry-server" Dec 09 15:22:59 crc kubenswrapper[4716]: I1209 15:22:59.649187 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="minio-dev/minio" Dec 09 15:22:59 crc kubenswrapper[4716]: I1209 15:22:59.655590 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"minio-dev"/"openshift-service-ca.crt" Dec 09 15:22:59 crc kubenswrapper[4716]: I1209 15:22:59.655729 4716 reflector.go:368] Caches populated for *v1.Secret from object-"minio-dev"/"default-dockercfg-qg7qj" Dec 09 15:22:59 crc kubenswrapper[4716]: I1209 15:22:59.655908 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"minio-dev"/"kube-root-ca.crt" Dec 09 15:22:59 crc kubenswrapper[4716]: I1209 15:22:59.662279 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["minio-dev/minio"] Dec 09 15:22:59 crc kubenswrapper[4716]: I1209 15:22:59.701581 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s566b\" (UniqueName: \"kubernetes.io/projected/03852f82-c325-4f8e-a829-c9fe0716ab71-kube-api-access-s566b\") pod \"minio\" (UID: \"03852f82-c325-4f8e-a829-c9fe0716ab71\") " pod="minio-dev/minio" Dec 09 15:22:59 crc kubenswrapper[4716]: I1209 15:22:59.701853 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-34d13e63-4f59-497b-b0fe-2601478b6182\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-34d13e63-4f59-497b-b0fe-2601478b6182\") pod \"minio\" (UID: \"03852f82-c325-4f8e-a829-c9fe0716ab71\") " pod="minio-dev/minio" Dec 09 15:22:59 crc kubenswrapper[4716]: I1209 15:22:59.803856 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-34d13e63-4f59-497b-b0fe-2601478b6182\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-34d13e63-4f59-497b-b0fe-2601478b6182\") pod \"minio\" (UID: \"03852f82-c325-4f8e-a829-c9fe0716ab71\") " pod="minio-dev/minio" Dec 09 15:22:59 crc kubenswrapper[4716]: I1209 15:22:59.803989 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s566b\" (UniqueName: \"kubernetes.io/projected/03852f82-c325-4f8e-a829-c9fe0716ab71-kube-api-access-s566b\") pod \"minio\" (UID: \"03852f82-c325-4f8e-a829-c9fe0716ab71\") " pod="minio-dev/minio" Dec 09 15:22:59 crc kubenswrapper[4716]: I1209 15:22:59.807917 4716 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 09 15:22:59 crc kubenswrapper[4716]: I1209 15:22:59.807970 4716 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-34d13e63-4f59-497b-b0fe-2601478b6182\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-34d13e63-4f59-497b-b0fe-2601478b6182\") pod \"minio\" (UID: \"03852f82-c325-4f8e-a829-c9fe0716ab71\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/f976bdefb73ec9dd5fbae60286ef59746e79674a696978031ba3b241c24a6612/globalmount\"" pod="minio-dev/minio" Dec 09 15:22:59 crc kubenswrapper[4716]: I1209 15:22:59.825385 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s566b\" (UniqueName: \"kubernetes.io/projected/03852f82-c325-4f8e-a829-c9fe0716ab71-kube-api-access-s566b\") pod \"minio\" (UID: \"03852f82-c325-4f8e-a829-c9fe0716ab71\") " pod="minio-dev/minio" Dec 09 15:22:59 crc kubenswrapper[4716]: I1209 15:22:59.837063 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-34d13e63-4f59-497b-b0fe-2601478b6182\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-34d13e63-4f59-497b-b0fe-2601478b6182\") pod \"minio\" (UID: \"03852f82-c325-4f8e-a829-c9fe0716ab71\") " pod="minio-dev/minio" Dec 09 15:22:59 crc kubenswrapper[4716]: I1209 15:22:59.974898 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="minio-dev/minio" Dec 09 15:23:00 crc kubenswrapper[4716]: I1209 15:23:00.223529 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["minio-dev/minio"] Dec 09 15:23:00 crc kubenswrapper[4716]: I1209 15:23:00.549142 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="minio-dev/minio" event={"ID":"03852f82-c325-4f8e-a829-c9fe0716ab71","Type":"ContainerStarted","Data":"d2ea8716630899247e79c7b03514ccb332898bc8e20376008d2af616c84619d5"} Dec 09 15:23:03 crc kubenswrapper[4716]: I1209 15:23:03.573111 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="minio-dev/minio" event={"ID":"03852f82-c325-4f8e-a829-c9fe0716ab71","Type":"ContainerStarted","Data":"081640e6f7c4cf29f84056731dc04b50d7286e351823a9864ed385f845e52f51"} Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.580464 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="minio-dev/minio" podStartSLOduration=7.483897636 podStartE2EDuration="10.580430749s" podCreationTimestamp="2025-12-09 15:22:57 +0000 UTC" firstStartedPulling="2025-12-09 15:23:00.246148643 +0000 UTC m=+867.400892631" lastFinishedPulling="2025-12-09 15:23:03.342681756 +0000 UTC m=+870.497425744" observedRunningTime="2025-12-09 15:23:03.601410218 +0000 UTC m=+870.756154206" watchObservedRunningTime="2025-12-09 15:23:07.580430749 +0000 UTC m=+874.735174737" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.583294 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-distributor-76cc67bf56-shzjb"] Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.584581 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-distributor-76cc67bf56-shzjb" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.586373 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-dockercfg-692ws" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.587551 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-distributor-grpc" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.587650 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-config" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.588110 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-ca-bundle" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.589017 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-distributor-http" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.611829 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-distributor-76cc67bf56-shzjb"] Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.671691 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6vqcs\" (UniqueName: \"kubernetes.io/projected/96be7883-9eb8-4f5c-9376-e4237b3663f8-kube-api-access-6vqcs\") pod \"logging-loki-distributor-76cc67bf56-shzjb\" (UID: \"96be7883-9eb8-4f5c-9376-e4237b3663f8\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-shzjb" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.671758 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/96be7883-9eb8-4f5c-9376-e4237b3663f8-logging-loki-distributor-http\") pod \"logging-loki-distributor-76cc67bf56-shzjb\" (UID: \"96be7883-9eb8-4f5c-9376-e4237b3663f8\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-shzjb" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.671965 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/96be7883-9eb8-4f5c-9376-e4237b3663f8-logging-loki-ca-bundle\") pod \"logging-loki-distributor-76cc67bf56-shzjb\" (UID: \"96be7883-9eb8-4f5c-9376-e4237b3663f8\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-shzjb" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.672044 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/96be7883-9eb8-4f5c-9376-e4237b3663f8-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-76cc67bf56-shzjb\" (UID: \"96be7883-9eb8-4f5c-9376-e4237b3663f8\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-shzjb" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.672188 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/96be7883-9eb8-4f5c-9376-e4237b3663f8-config\") pod \"logging-loki-distributor-76cc67bf56-shzjb\" (UID: \"96be7883-9eb8-4f5c-9376-e4237b3663f8\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-shzjb" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.742604 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-querier-5895d59bb8-bqnhh"] Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.743742 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-querier-5895d59bb8-bqnhh" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.746574 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-s3" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.747858 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-querier-http" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.753496 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-querier-grpc" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.773895 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/96be7883-9eb8-4f5c-9376-e4237b3663f8-logging-loki-ca-bundle\") pod \"logging-loki-distributor-76cc67bf56-shzjb\" (UID: \"96be7883-9eb8-4f5c-9376-e4237b3663f8\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-shzjb" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.773968 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/96be7883-9eb8-4f5c-9376-e4237b3663f8-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-76cc67bf56-shzjb\" (UID: \"96be7883-9eb8-4f5c-9376-e4237b3663f8\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-shzjb" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.774038 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/96be7883-9eb8-4f5c-9376-e4237b3663f8-config\") pod \"logging-loki-distributor-76cc67bf56-shzjb\" (UID: \"96be7883-9eb8-4f5c-9376-e4237b3663f8\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-shzjb" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.774171 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6vqcs\" (UniqueName: \"kubernetes.io/projected/96be7883-9eb8-4f5c-9376-e4237b3663f8-kube-api-access-6vqcs\") pod \"logging-loki-distributor-76cc67bf56-shzjb\" (UID: \"96be7883-9eb8-4f5c-9376-e4237b3663f8\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-shzjb" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.774199 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/96be7883-9eb8-4f5c-9376-e4237b3663f8-logging-loki-distributor-http\") pod \"logging-loki-distributor-76cc67bf56-shzjb\" (UID: \"96be7883-9eb8-4f5c-9376-e4237b3663f8\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-shzjb" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.781911 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/96be7883-9eb8-4f5c-9376-e4237b3663f8-logging-loki-ca-bundle\") pod \"logging-loki-distributor-76cc67bf56-shzjb\" (UID: \"96be7883-9eb8-4f5c-9376-e4237b3663f8\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-shzjb" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.783603 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/96be7883-9eb8-4f5c-9376-e4237b3663f8-config\") pod \"logging-loki-distributor-76cc67bf56-shzjb\" (UID: \"96be7883-9eb8-4f5c-9376-e4237b3663f8\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-shzjb" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.783806 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/96be7883-9eb8-4f5c-9376-e4237b3663f8-logging-loki-distributor-http\") pod \"logging-loki-distributor-76cc67bf56-shzjb\" (UID: \"96be7883-9eb8-4f5c-9376-e4237b3663f8\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-shzjb" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.787113 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-querier-5895d59bb8-bqnhh"] Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.789389 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/96be7883-9eb8-4f5c-9376-e4237b3663f8-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-76cc67bf56-shzjb\" (UID: \"96be7883-9eb8-4f5c-9376-e4237b3663f8\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-shzjb" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.833217 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6vqcs\" (UniqueName: \"kubernetes.io/projected/96be7883-9eb8-4f5c-9376-e4237b3663f8-kube-api-access-6vqcs\") pod \"logging-loki-distributor-76cc67bf56-shzjb\" (UID: \"96be7883-9eb8-4f5c-9376-e4237b3663f8\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-shzjb" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.876065 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4013658d-da41-4b67-84da-b0ca67216d45-logging-loki-ca-bundle\") pod \"logging-loki-querier-5895d59bb8-bqnhh\" (UID: \"4013658d-da41-4b67-84da-b0ca67216d45\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-bqnhh" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.876152 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/4013658d-da41-4b67-84da-b0ca67216d45-logging-loki-querier-http\") pod \"logging-loki-querier-5895d59bb8-bqnhh\" (UID: \"4013658d-da41-4b67-84da-b0ca67216d45\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-bqnhh" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.876184 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4013658d-da41-4b67-84da-b0ca67216d45-config\") pod \"logging-loki-querier-5895d59bb8-bqnhh\" (UID: \"4013658d-da41-4b67-84da-b0ca67216d45\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-bqnhh" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.876227 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/4013658d-da41-4b67-84da-b0ca67216d45-logging-loki-s3\") pod \"logging-loki-querier-5895d59bb8-bqnhh\" (UID: \"4013658d-da41-4b67-84da-b0ca67216d45\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-bqnhh" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.876253 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6t88f\" (UniqueName: \"kubernetes.io/projected/4013658d-da41-4b67-84da-b0ca67216d45-kube-api-access-6t88f\") pod \"logging-loki-querier-5895d59bb8-bqnhh\" (UID: \"4013658d-da41-4b67-84da-b0ca67216d45\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-bqnhh" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.876304 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/4013658d-da41-4b67-84da-b0ca67216d45-logging-loki-querier-grpc\") pod \"logging-loki-querier-5895d59bb8-bqnhh\" (UID: \"4013658d-da41-4b67-84da-b0ca67216d45\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-bqnhh" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.891697 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-query-frontend-84558f7c9f-zcnfl"] Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.892854 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-zcnfl" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.903821 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-query-frontend-84558f7c9f-zcnfl"] Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.915147 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-distributor-76cc67bf56-shzjb" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.920451 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-query-frontend-http" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.921177 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-query-frontend-grpc" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.979138 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/4013658d-da41-4b67-84da-b0ca67216d45-logging-loki-querier-grpc\") pod \"logging-loki-querier-5895d59bb8-bqnhh\" (UID: \"4013658d-da41-4b67-84da-b0ca67216d45\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-bqnhh" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.979613 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/1c35fb07-6fe6-490e-b627-165a5500e574-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-84558f7c9f-zcnfl\" (UID: \"1c35fb07-6fe6-490e-b627-165a5500e574\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-zcnfl" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.979852 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/1c35fb07-6fe6-490e-b627-165a5500e574-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-84558f7c9f-zcnfl\" (UID: \"1c35fb07-6fe6-490e-b627-165a5500e574\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-zcnfl" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.979992 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4013658d-da41-4b67-84da-b0ca67216d45-logging-loki-ca-bundle\") pod \"logging-loki-querier-5895d59bb8-bqnhh\" (UID: \"4013658d-da41-4b67-84da-b0ca67216d45\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-bqnhh" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.980126 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c35fb07-6fe6-490e-b627-165a5500e574-config\") pod \"logging-loki-query-frontend-84558f7c9f-zcnfl\" (UID: \"1c35fb07-6fe6-490e-b627-165a5500e574\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-zcnfl" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.980314 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/4013658d-da41-4b67-84da-b0ca67216d45-logging-loki-querier-http\") pod \"logging-loki-querier-5895d59bb8-bqnhh\" (UID: \"4013658d-da41-4b67-84da-b0ca67216d45\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-bqnhh" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.980440 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4013658d-da41-4b67-84da-b0ca67216d45-config\") pod \"logging-loki-querier-5895d59bb8-bqnhh\" (UID: \"4013658d-da41-4b67-84da-b0ca67216d45\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-bqnhh" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.980587 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rrv52\" (UniqueName: \"kubernetes.io/projected/1c35fb07-6fe6-490e-b627-165a5500e574-kube-api-access-rrv52\") pod \"logging-loki-query-frontend-84558f7c9f-zcnfl\" (UID: \"1c35fb07-6fe6-490e-b627-165a5500e574\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-zcnfl" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.980781 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1c35fb07-6fe6-490e-b627-165a5500e574-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-84558f7c9f-zcnfl\" (UID: \"1c35fb07-6fe6-490e-b627-165a5500e574\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-zcnfl" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.981157 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/4013658d-da41-4b67-84da-b0ca67216d45-logging-loki-s3\") pod \"logging-loki-querier-5895d59bb8-bqnhh\" (UID: \"4013658d-da41-4b67-84da-b0ca67216d45\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-bqnhh" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.981311 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6t88f\" (UniqueName: \"kubernetes.io/projected/4013658d-da41-4b67-84da-b0ca67216d45-kube-api-access-6t88f\") pod \"logging-loki-querier-5895d59bb8-bqnhh\" (UID: \"4013658d-da41-4b67-84da-b0ca67216d45\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-bqnhh" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.981725 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4013658d-da41-4b67-84da-b0ca67216d45-logging-loki-ca-bundle\") pod \"logging-loki-querier-5895d59bb8-bqnhh\" (UID: \"4013658d-da41-4b67-84da-b0ca67216d45\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-bqnhh" Dec 09 15:23:07 crc kubenswrapper[4716]: I1209 15:23:07.993556 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4013658d-da41-4b67-84da-b0ca67216d45-config\") pod \"logging-loki-querier-5895d59bb8-bqnhh\" (UID: \"4013658d-da41-4b67-84da-b0ca67216d45\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-bqnhh" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.002643 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/4013658d-da41-4b67-84da-b0ca67216d45-logging-loki-querier-http\") pod \"logging-loki-querier-5895d59bb8-bqnhh\" (UID: \"4013658d-da41-4b67-84da-b0ca67216d45\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-bqnhh" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.003634 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/4013658d-da41-4b67-84da-b0ca67216d45-logging-loki-s3\") pod \"logging-loki-querier-5895d59bb8-bqnhh\" (UID: \"4013658d-da41-4b67-84da-b0ca67216d45\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-bqnhh" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.004484 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/4013658d-da41-4b67-84da-b0ca67216d45-logging-loki-querier-grpc\") pod \"logging-loki-querier-5895d59bb8-bqnhh\" (UID: \"4013658d-da41-4b67-84da-b0ca67216d45\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-bqnhh" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.012107 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6t88f\" (UniqueName: \"kubernetes.io/projected/4013658d-da41-4b67-84da-b0ca67216d45-kube-api-access-6t88f\") pod \"logging-loki-querier-5895d59bb8-bqnhh\" (UID: \"4013658d-da41-4b67-84da-b0ca67216d45\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-bqnhh" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.085192 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-querier-5895d59bb8-bqnhh" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.118813 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c35fb07-6fe6-490e-b627-165a5500e574-config\") pod \"logging-loki-query-frontend-84558f7c9f-zcnfl\" (UID: \"1c35fb07-6fe6-490e-b627-165a5500e574\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-zcnfl" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.119064 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rrv52\" (UniqueName: \"kubernetes.io/projected/1c35fb07-6fe6-490e-b627-165a5500e574-kube-api-access-rrv52\") pod \"logging-loki-query-frontend-84558f7c9f-zcnfl\" (UID: \"1c35fb07-6fe6-490e-b627-165a5500e574\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-zcnfl" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.119156 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1c35fb07-6fe6-490e-b627-165a5500e574-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-84558f7c9f-zcnfl\" (UID: \"1c35fb07-6fe6-490e-b627-165a5500e574\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-zcnfl" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.119387 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/1c35fb07-6fe6-490e-b627-165a5500e574-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-84558f7c9f-zcnfl\" (UID: \"1c35fb07-6fe6-490e-b627-165a5500e574\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-zcnfl" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.119444 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/1c35fb07-6fe6-490e-b627-165a5500e574-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-84558f7c9f-zcnfl\" (UID: \"1c35fb07-6fe6-490e-b627-165a5500e574\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-zcnfl" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.123767 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/1c35fb07-6fe6-490e-b627-165a5500e574-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-84558f7c9f-zcnfl\" (UID: \"1c35fb07-6fe6-490e-b627-165a5500e574\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-zcnfl" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.124126 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c35fb07-6fe6-490e-b627-165a5500e574-config\") pod \"logging-loki-query-frontend-84558f7c9f-zcnfl\" (UID: \"1c35fb07-6fe6-490e-b627-165a5500e574\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-zcnfl" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.124296 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-gateway-58996586f7-4xczf"] Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.124453 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1c35fb07-6fe6-490e-b627-165a5500e574-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-84558f7c9f-zcnfl\" (UID: \"1c35fb07-6fe6-490e-b627-165a5500e574\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-zcnfl" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.129037 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-58996586f7-4xczf" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.129233 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/1c35fb07-6fe6-490e-b627-165a5500e574-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-84558f7c9f-zcnfl\" (UID: \"1c35fb07-6fe6-490e-b627-165a5500e574\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-zcnfl" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.133051 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.143851 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-http" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.144226 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-client-http" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.144424 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-gateway" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.144561 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-gateway-ca-bundle" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.184679 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rrv52\" (UniqueName: \"kubernetes.io/projected/1c35fb07-6fe6-490e-b627-165a5500e574-kube-api-access-rrv52\") pod \"logging-loki-query-frontend-84558f7c9f-zcnfl\" (UID: \"1c35fb07-6fe6-490e-b627-165a5500e574\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-zcnfl" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.227829 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-gateway-58996586f7-7vqxj"] Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.230888 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-58996586f7-7vqxj" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.237872 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-dockercfg-tpsqc" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.238040 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-58996586f7-4xczf"] Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.246478 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-zcnfl" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.250109 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-58996586f7-7vqxj"] Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.331805 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef-tls-secret\") pod \"logging-loki-gateway-58996586f7-7vqxj\" (UID: \"f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef\") " pod="openshift-logging/logging-loki-gateway-58996586f7-7vqxj" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.331875 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/3047bff0-e43d-44a3-9a8e-7491cbc979af-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-58996586f7-4xczf\" (UID: \"3047bff0-e43d-44a3-9a8e-7491cbc979af\") " pod="openshift-logging/logging-loki-gateway-58996586f7-4xczf" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.331921 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/3047bff0-e43d-44a3-9a8e-7491cbc979af-rbac\") pod \"logging-loki-gateway-58996586f7-4xczf\" (UID: \"3047bff0-e43d-44a3-9a8e-7491cbc979af\") " pod="openshift-logging/logging-loki-gateway-58996586f7-4xczf" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.331946 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/3047bff0-e43d-44a3-9a8e-7491cbc979af-lokistack-gateway\") pod \"logging-loki-gateway-58996586f7-4xczf\" (UID: \"3047bff0-e43d-44a3-9a8e-7491cbc979af\") " pod="openshift-logging/logging-loki-gateway-58996586f7-4xczf" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.331964 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/3047bff0-e43d-44a3-9a8e-7491cbc979af-tls-secret\") pod \"logging-loki-gateway-58996586f7-4xczf\" (UID: \"3047bff0-e43d-44a3-9a8e-7491cbc979af\") " pod="openshift-logging/logging-loki-gateway-58996586f7-4xczf" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.331980 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s7v98\" (UniqueName: \"kubernetes.io/projected/f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef-kube-api-access-s7v98\") pod \"logging-loki-gateway-58996586f7-7vqxj\" (UID: \"f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef\") " pod="openshift-logging/logging-loki-gateway-58996586f7-7vqxj" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.332008 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-58996586f7-7vqxj\" (UID: \"f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef\") " pod="openshift-logging/logging-loki-gateway-58996586f7-7vqxj" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.332034 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef-lokistack-gateway\") pod \"logging-loki-gateway-58996586f7-7vqxj\" (UID: \"f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef\") " pod="openshift-logging/logging-loki-gateway-58996586f7-7vqxj" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.332056 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/3047bff0-e43d-44a3-9a8e-7491cbc979af-tenants\") pod \"logging-loki-gateway-58996586f7-4xczf\" (UID: \"3047bff0-e43d-44a3-9a8e-7491cbc979af\") " pod="openshift-logging/logging-loki-gateway-58996586f7-4xczf" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.332086 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-58996586f7-7vqxj\" (UID: \"f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef\") " pod="openshift-logging/logging-loki-gateway-58996586f7-7vqxj" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.332102 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef-logging-loki-ca-bundle\") pod \"logging-loki-gateway-58996586f7-7vqxj\" (UID: \"f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef\") " pod="openshift-logging/logging-loki-gateway-58996586f7-7vqxj" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.332124 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef-rbac\") pod \"logging-loki-gateway-58996586f7-7vqxj\" (UID: \"f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef\") " pod="openshift-logging/logging-loki-gateway-58996586f7-7vqxj" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.332141 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef-tenants\") pod \"logging-loki-gateway-58996586f7-7vqxj\" (UID: \"f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef\") " pod="openshift-logging/logging-loki-gateway-58996586f7-7vqxj" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.332175 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vcq2q\" (UniqueName: \"kubernetes.io/projected/3047bff0-e43d-44a3-9a8e-7491cbc979af-kube-api-access-vcq2q\") pod \"logging-loki-gateway-58996586f7-4xczf\" (UID: \"3047bff0-e43d-44a3-9a8e-7491cbc979af\") " pod="openshift-logging/logging-loki-gateway-58996586f7-4xczf" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.332194 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3047bff0-e43d-44a3-9a8e-7491cbc979af-logging-loki-ca-bundle\") pod \"logging-loki-gateway-58996586f7-4xczf\" (UID: \"3047bff0-e43d-44a3-9a8e-7491cbc979af\") " pod="openshift-logging/logging-loki-gateway-58996586f7-4xczf" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.332212 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3047bff0-e43d-44a3-9a8e-7491cbc979af-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-58996586f7-4xczf\" (UID: \"3047bff0-e43d-44a3-9a8e-7491cbc979af\") " pod="openshift-logging/logging-loki-gateway-58996586f7-4xczf" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.434449 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef-tls-secret\") pod \"logging-loki-gateway-58996586f7-7vqxj\" (UID: \"f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef\") " pod="openshift-logging/logging-loki-gateway-58996586f7-7vqxj" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.435057 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/3047bff0-e43d-44a3-9a8e-7491cbc979af-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-58996586f7-4xczf\" (UID: \"3047bff0-e43d-44a3-9a8e-7491cbc979af\") " pod="openshift-logging/logging-loki-gateway-58996586f7-4xczf" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.435104 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/3047bff0-e43d-44a3-9a8e-7491cbc979af-rbac\") pod \"logging-loki-gateway-58996586f7-4xczf\" (UID: \"3047bff0-e43d-44a3-9a8e-7491cbc979af\") " pod="openshift-logging/logging-loki-gateway-58996586f7-4xczf" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.435129 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/3047bff0-e43d-44a3-9a8e-7491cbc979af-lokistack-gateway\") pod \"logging-loki-gateway-58996586f7-4xczf\" (UID: \"3047bff0-e43d-44a3-9a8e-7491cbc979af\") " pod="openshift-logging/logging-loki-gateway-58996586f7-4xczf" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.435147 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/3047bff0-e43d-44a3-9a8e-7491cbc979af-tls-secret\") pod \"logging-loki-gateway-58996586f7-4xczf\" (UID: \"3047bff0-e43d-44a3-9a8e-7491cbc979af\") " pod="openshift-logging/logging-loki-gateway-58996586f7-4xczf" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.435165 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s7v98\" (UniqueName: \"kubernetes.io/projected/f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef-kube-api-access-s7v98\") pod \"logging-loki-gateway-58996586f7-7vqxj\" (UID: \"f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef\") " pod="openshift-logging/logging-loki-gateway-58996586f7-7vqxj" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.435198 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-58996586f7-7vqxj\" (UID: \"f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef\") " pod="openshift-logging/logging-loki-gateway-58996586f7-7vqxj" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.435230 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef-lokistack-gateway\") pod \"logging-loki-gateway-58996586f7-7vqxj\" (UID: \"f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef\") " pod="openshift-logging/logging-loki-gateway-58996586f7-7vqxj" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.435251 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/3047bff0-e43d-44a3-9a8e-7491cbc979af-tenants\") pod \"logging-loki-gateway-58996586f7-4xczf\" (UID: \"3047bff0-e43d-44a3-9a8e-7491cbc979af\") " pod="openshift-logging/logging-loki-gateway-58996586f7-4xczf" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.435288 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef-logging-loki-ca-bundle\") pod \"logging-loki-gateway-58996586f7-7vqxj\" (UID: \"f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef\") " pod="openshift-logging/logging-loki-gateway-58996586f7-7vqxj" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.435319 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-58996586f7-7vqxj\" (UID: \"f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef\") " pod="openshift-logging/logging-loki-gateway-58996586f7-7vqxj" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.435348 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef-rbac\") pod \"logging-loki-gateway-58996586f7-7vqxj\" (UID: \"f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef\") " pod="openshift-logging/logging-loki-gateway-58996586f7-7vqxj" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.435379 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef-tenants\") pod \"logging-loki-gateway-58996586f7-7vqxj\" (UID: \"f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef\") " pod="openshift-logging/logging-loki-gateway-58996586f7-7vqxj" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.435409 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vcq2q\" (UniqueName: \"kubernetes.io/projected/3047bff0-e43d-44a3-9a8e-7491cbc979af-kube-api-access-vcq2q\") pod \"logging-loki-gateway-58996586f7-4xczf\" (UID: \"3047bff0-e43d-44a3-9a8e-7491cbc979af\") " pod="openshift-logging/logging-loki-gateway-58996586f7-4xczf" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.435436 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3047bff0-e43d-44a3-9a8e-7491cbc979af-logging-loki-ca-bundle\") pod \"logging-loki-gateway-58996586f7-4xczf\" (UID: \"3047bff0-e43d-44a3-9a8e-7491cbc979af\") " pod="openshift-logging/logging-loki-gateway-58996586f7-4xczf" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.435456 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3047bff0-e43d-44a3-9a8e-7491cbc979af-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-58996586f7-4xczf\" (UID: \"3047bff0-e43d-44a3-9a8e-7491cbc979af\") " pod="openshift-logging/logging-loki-gateway-58996586f7-4xczf" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.436843 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef-logging-loki-ca-bundle\") pod \"logging-loki-gateway-58996586f7-7vqxj\" (UID: \"f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef\") " pod="openshift-logging/logging-loki-gateway-58996586f7-7vqxj" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.438315 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-58996586f7-7vqxj\" (UID: \"f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef\") " pod="openshift-logging/logging-loki-gateway-58996586f7-7vqxj" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.438551 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef-rbac\") pod \"logging-loki-gateway-58996586f7-7vqxj\" (UID: \"f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef\") " pod="openshift-logging/logging-loki-gateway-58996586f7-7vqxj" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.439124 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef-lokistack-gateway\") pod \"logging-loki-gateway-58996586f7-7vqxj\" (UID: \"f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef\") " pod="openshift-logging/logging-loki-gateway-58996586f7-7vqxj" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.440838 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3047bff0-e43d-44a3-9a8e-7491cbc979af-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-58996586f7-4xczf\" (UID: \"3047bff0-e43d-44a3-9a8e-7491cbc979af\") " pod="openshift-logging/logging-loki-gateway-58996586f7-4xczf" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.440918 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/3047bff0-e43d-44a3-9a8e-7491cbc979af-lokistack-gateway\") pod \"logging-loki-gateway-58996586f7-4xczf\" (UID: \"3047bff0-e43d-44a3-9a8e-7491cbc979af\") " pod="openshift-logging/logging-loki-gateway-58996586f7-4xczf" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.446289 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3047bff0-e43d-44a3-9a8e-7491cbc979af-logging-loki-ca-bundle\") pod \"logging-loki-gateway-58996586f7-4xczf\" (UID: \"3047bff0-e43d-44a3-9a8e-7491cbc979af\") " pod="openshift-logging/logging-loki-gateway-58996586f7-4xczf" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.446806 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/3047bff0-e43d-44a3-9a8e-7491cbc979af-rbac\") pod \"logging-loki-gateway-58996586f7-4xczf\" (UID: \"3047bff0-e43d-44a3-9a8e-7491cbc979af\") " pod="openshift-logging/logging-loki-gateway-58996586f7-4xczf" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.452152 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/3047bff0-e43d-44a3-9a8e-7491cbc979af-tenants\") pod \"logging-loki-gateway-58996586f7-4xczf\" (UID: \"3047bff0-e43d-44a3-9a8e-7491cbc979af\") " pod="openshift-logging/logging-loki-gateway-58996586f7-4xczf" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.455115 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/3047bff0-e43d-44a3-9a8e-7491cbc979af-tls-secret\") pod \"logging-loki-gateway-58996586f7-4xczf\" (UID: \"3047bff0-e43d-44a3-9a8e-7491cbc979af\") " pod="openshift-logging/logging-loki-gateway-58996586f7-4xczf" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.469656 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef-tls-secret\") pod \"logging-loki-gateway-58996586f7-7vqxj\" (UID: \"f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef\") " pod="openshift-logging/logging-loki-gateway-58996586f7-7vqxj" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.471827 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/3047bff0-e43d-44a3-9a8e-7491cbc979af-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-58996586f7-4xczf\" (UID: \"3047bff0-e43d-44a3-9a8e-7491cbc979af\") " pod="openshift-logging/logging-loki-gateway-58996586f7-4xczf" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.472969 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s7v98\" (UniqueName: \"kubernetes.io/projected/f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef-kube-api-access-s7v98\") pod \"logging-loki-gateway-58996586f7-7vqxj\" (UID: \"f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef\") " pod="openshift-logging/logging-loki-gateway-58996586f7-7vqxj" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.476186 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef-tenants\") pod \"logging-loki-gateway-58996586f7-7vqxj\" (UID: \"f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef\") " pod="openshift-logging/logging-loki-gateway-58996586f7-7vqxj" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.480835 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-58996586f7-7vqxj\" (UID: \"f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef\") " pod="openshift-logging/logging-loki-gateway-58996586f7-7vqxj" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.481073 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vcq2q\" (UniqueName: \"kubernetes.io/projected/3047bff0-e43d-44a3-9a8e-7491cbc979af-kube-api-access-vcq2q\") pod \"logging-loki-gateway-58996586f7-4xczf\" (UID: \"3047bff0-e43d-44a3-9a8e-7491cbc979af\") " pod="openshift-logging/logging-loki-gateway-58996586f7-4xczf" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.611933 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-58996586f7-7vqxj" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.689950 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-distributor-76cc67bf56-shzjb"] Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.771720 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-query-frontend-84558f7c9f-zcnfl"] Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.773315 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-58996586f7-4xczf" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.779087 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.780293 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-ingester-0" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.783256 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-ingester-grpc" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.789158 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.789276 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-ingester-http" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.862634 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.868096 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-compactor-0" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.875670 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-compactor-grpc" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.875828 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-compactor-http" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.876356 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.954117 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-querier-5895d59bb8-bqnhh"] Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.962380 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-1280e5f8-9ce2-4e5a-9fb8-148ee1ecef97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1280e5f8-9ce2-4e5a-9fb8-148ee1ecef97\") pod \"logging-loki-ingester-0\" (UID: \"00f16fe7-88b3-4d0a-ba38-f68a4d340686\") " pod="openshift-logging/logging-loki-ingester-0" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.962418 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/00f16fe7-88b3-4d0a-ba38-f68a4d340686-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"00f16fe7-88b3-4d0a-ba38-f68a4d340686\") " pod="openshift-logging/logging-loki-ingester-0" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.962479 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-adc16b44-90b2-4f96-9c5e-50adc2966741\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-adc16b44-90b2-4f96-9c5e-50adc2966741\") pod \"logging-loki-ingester-0\" (UID: \"00f16fe7-88b3-4d0a-ba38-f68a4d340686\") " pod="openshift-logging/logging-loki-ingester-0" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.962503 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kpd77\" (UniqueName: \"kubernetes.io/projected/00f16fe7-88b3-4d0a-ba38-f68a4d340686-kube-api-access-kpd77\") pod \"logging-loki-ingester-0\" (UID: \"00f16fe7-88b3-4d0a-ba38-f68a4d340686\") " pod="openshift-logging/logging-loki-ingester-0" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.962522 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/00f16fe7-88b3-4d0a-ba38-f68a4d340686-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"00f16fe7-88b3-4d0a-ba38-f68a4d340686\") " pod="openshift-logging/logging-loki-ingester-0" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.962573 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/00f16fe7-88b3-4d0a-ba38-f68a4d340686-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"00f16fe7-88b3-4d0a-ba38-f68a4d340686\") " pod="openshift-logging/logging-loki-ingester-0" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.963081 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00f16fe7-88b3-4d0a-ba38-f68a4d340686-config\") pod \"logging-loki-ingester-0\" (UID: \"00f16fe7-88b3-4d0a-ba38-f68a4d340686\") " pod="openshift-logging/logging-loki-ingester-0" Dec 09 15:23:08 crc kubenswrapper[4716]: I1209 15:23:08.963103 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/00f16fe7-88b3-4d0a-ba38-f68a4d340686-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"00f16fe7-88b3-4d0a-ba38-f68a4d340686\") " pod="openshift-logging/logging-loki-ingester-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.008680 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-58996586f7-7vqxj"] Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.020184 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.021605 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-index-gateway-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.025833 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-index-gateway-grpc" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.026227 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-index-gateway-http" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.028340 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.065693 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/00f16fe7-88b3-4d0a-ba38-f68a4d340686-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"00f16fe7-88b3-4d0a-ba38-f68a4d340686\") " pod="openshift-logging/logging-loki-ingester-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.065844 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/610ad8de-905a-4420-adc6-a0b438e2b346-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"610ad8de-905a-4420-adc6-a0b438e2b346\") " pod="openshift-logging/logging-loki-compactor-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.065874 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/610ad8de-905a-4420-adc6-a0b438e2b346-config\") pod \"logging-loki-compactor-0\" (UID: \"610ad8de-905a-4420-adc6-a0b438e2b346\") " pod="openshift-logging/logging-loki-compactor-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.065918 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hn84n\" (UniqueName: \"kubernetes.io/projected/610ad8de-905a-4420-adc6-a0b438e2b346-kube-api-access-hn84n\") pod \"logging-loki-compactor-0\" (UID: \"610ad8de-905a-4420-adc6-a0b438e2b346\") " pod="openshift-logging/logging-loki-compactor-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.065959 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/610ad8de-905a-4420-adc6-a0b438e2b346-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"610ad8de-905a-4420-adc6-a0b438e2b346\") " pod="openshift-logging/logging-loki-compactor-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.066014 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/610ad8de-905a-4420-adc6-a0b438e2b346-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"610ad8de-905a-4420-adc6-a0b438e2b346\") " pod="openshift-logging/logging-loki-compactor-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.066040 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00f16fe7-88b3-4d0a-ba38-f68a4d340686-config\") pod \"logging-loki-ingester-0\" (UID: \"00f16fe7-88b3-4d0a-ba38-f68a4d340686\") " pod="openshift-logging/logging-loki-ingester-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.066089 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/00f16fe7-88b3-4d0a-ba38-f68a4d340686-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"00f16fe7-88b3-4d0a-ba38-f68a4d340686\") " pod="openshift-logging/logging-loki-ingester-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.066125 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-1280e5f8-9ce2-4e5a-9fb8-148ee1ecef97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1280e5f8-9ce2-4e5a-9fb8-148ee1ecef97\") pod \"logging-loki-ingester-0\" (UID: \"00f16fe7-88b3-4d0a-ba38-f68a4d340686\") " pod="openshift-logging/logging-loki-ingester-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.067499 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/00f16fe7-88b3-4d0a-ba38-f68a4d340686-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"00f16fe7-88b3-4d0a-ba38-f68a4d340686\") " pod="openshift-logging/logging-loki-ingester-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.067598 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-adc16b44-90b2-4f96-9c5e-50adc2966741\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-adc16b44-90b2-4f96-9c5e-50adc2966741\") pod \"logging-loki-ingester-0\" (UID: \"00f16fe7-88b3-4d0a-ba38-f68a4d340686\") " pod="openshift-logging/logging-loki-ingester-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.067645 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kpd77\" (UniqueName: \"kubernetes.io/projected/00f16fe7-88b3-4d0a-ba38-f68a4d340686-kube-api-access-kpd77\") pod \"logging-loki-ingester-0\" (UID: \"00f16fe7-88b3-4d0a-ba38-f68a4d340686\") " pod="openshift-logging/logging-loki-ingester-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.067670 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/610ad8de-905a-4420-adc6-a0b438e2b346-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"610ad8de-905a-4420-adc6-a0b438e2b346\") " pod="openshift-logging/logging-loki-compactor-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.067699 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/00f16fe7-88b3-4d0a-ba38-f68a4d340686-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"00f16fe7-88b3-4d0a-ba38-f68a4d340686\") " pod="openshift-logging/logging-loki-ingester-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.067758 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-87d705e0-af27-4716-b69b-1c0e7b627147\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-87d705e0-af27-4716-b69b-1c0e7b627147\") pod \"logging-loki-compactor-0\" (UID: \"610ad8de-905a-4420-adc6-a0b438e2b346\") " pod="openshift-logging/logging-loki-compactor-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.070183 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00f16fe7-88b3-4d0a-ba38-f68a4d340686-config\") pod \"logging-loki-ingester-0\" (UID: \"00f16fe7-88b3-4d0a-ba38-f68a4d340686\") " pod="openshift-logging/logging-loki-ingester-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.072129 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/00f16fe7-88b3-4d0a-ba38-f68a4d340686-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"00f16fe7-88b3-4d0a-ba38-f68a4d340686\") " pod="openshift-logging/logging-loki-ingester-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.072613 4716 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.073121 4716 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-1280e5f8-9ce2-4e5a-9fb8-148ee1ecef97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1280e5f8-9ce2-4e5a-9fb8-148ee1ecef97\") pod \"logging-loki-ingester-0\" (UID: \"00f16fe7-88b3-4d0a-ba38-f68a4d340686\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/ce4d08c22457cdccd6fbaa5a5f2d37192f4bf36c22eb152bd94ab6454e1035ec/globalmount\"" pod="openshift-logging/logging-loki-ingester-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.072886 4716 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.073221 4716 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-adc16b44-90b2-4f96-9c5e-50adc2966741\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-adc16b44-90b2-4f96-9c5e-50adc2966741\") pod \"logging-loki-ingester-0\" (UID: \"00f16fe7-88b3-4d0a-ba38-f68a4d340686\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/f252b37724affc18aefa6cbc860d11a76e90427e7ea6c7e8fcc822965acc2785/globalmount\"" pod="openshift-logging/logging-loki-ingester-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.075595 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/00f16fe7-88b3-4d0a-ba38-f68a4d340686-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"00f16fe7-88b3-4d0a-ba38-f68a4d340686\") " pod="openshift-logging/logging-loki-ingester-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.076120 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/00f16fe7-88b3-4d0a-ba38-f68a4d340686-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"00f16fe7-88b3-4d0a-ba38-f68a4d340686\") " pod="openshift-logging/logging-loki-ingester-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.078774 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/00f16fe7-88b3-4d0a-ba38-f68a4d340686-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"00f16fe7-88b3-4d0a-ba38-f68a4d340686\") " pod="openshift-logging/logging-loki-ingester-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.089481 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kpd77\" (UniqueName: \"kubernetes.io/projected/00f16fe7-88b3-4d0a-ba38-f68a4d340686-kube-api-access-kpd77\") pod \"logging-loki-ingester-0\" (UID: \"00f16fe7-88b3-4d0a-ba38-f68a4d340686\") " pod="openshift-logging/logging-loki-ingester-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.104384 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-1280e5f8-9ce2-4e5a-9fb8-148ee1ecef97\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1280e5f8-9ce2-4e5a-9fb8-148ee1ecef97\") pod \"logging-loki-ingester-0\" (UID: \"00f16fe7-88b3-4d0a-ba38-f68a4d340686\") " pod="openshift-logging/logging-loki-ingester-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.110790 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-adc16b44-90b2-4f96-9c5e-50adc2966741\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-adc16b44-90b2-4f96-9c5e-50adc2966741\") pod \"logging-loki-ingester-0\" (UID: \"00f16fe7-88b3-4d0a-ba38-f68a4d340686\") " pod="openshift-logging/logging-loki-ingester-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.169321 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/610ad8de-905a-4420-adc6-a0b438e2b346-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"610ad8de-905a-4420-adc6-a0b438e2b346\") " pod="openshift-logging/logging-loki-compactor-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.169651 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/841d38f9-6814-4dc5-bb78-c154f42b3257-config\") pod \"logging-loki-index-gateway-0\" (UID: \"841d38f9-6814-4dc5-bb78-c154f42b3257\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.169747 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/610ad8de-905a-4420-adc6-a0b438e2b346-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"610ad8de-905a-4420-adc6-a0b438e2b346\") " pod="openshift-logging/logging-loki-compactor-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.169831 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/841d38f9-6814-4dc5-bb78-c154f42b3257-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"841d38f9-6814-4dc5-bb78-c154f42b3257\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.169972 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/610ad8de-905a-4420-adc6-a0b438e2b346-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"610ad8de-905a-4420-adc6-a0b438e2b346\") " pod="openshift-logging/logging-loki-compactor-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.170050 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/841d38f9-6814-4dc5-bb78-c154f42b3257-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"841d38f9-6814-4dc5-bb78-c154f42b3257\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.170131 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/841d38f9-6814-4dc5-bb78-c154f42b3257-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"841d38f9-6814-4dc5-bb78-c154f42b3257\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.170220 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-87d705e0-af27-4716-b69b-1c0e7b627147\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-87d705e0-af27-4716-b69b-1c0e7b627147\") pod \"logging-loki-compactor-0\" (UID: \"610ad8de-905a-4420-adc6-a0b438e2b346\") " pod="openshift-logging/logging-loki-compactor-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.170324 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/610ad8de-905a-4420-adc6-a0b438e2b346-config\") pod \"logging-loki-compactor-0\" (UID: \"610ad8de-905a-4420-adc6-a0b438e2b346\") " pod="openshift-logging/logging-loki-compactor-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.170403 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hn84n\" (UniqueName: \"kubernetes.io/projected/610ad8de-905a-4420-adc6-a0b438e2b346-kube-api-access-hn84n\") pod \"logging-loki-compactor-0\" (UID: \"610ad8de-905a-4420-adc6-a0b438e2b346\") " pod="openshift-logging/logging-loki-compactor-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.170499 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-35584002-1f0d-4e05-9e7a-554085f01b14\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-35584002-1f0d-4e05-9e7a-554085f01b14\") pod \"logging-loki-index-gateway-0\" (UID: \"841d38f9-6814-4dc5-bb78-c154f42b3257\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.170606 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/841d38f9-6814-4dc5-bb78-c154f42b3257-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"841d38f9-6814-4dc5-bb78-c154f42b3257\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.170650 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/610ad8de-905a-4420-adc6-a0b438e2b346-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"610ad8de-905a-4420-adc6-a0b438e2b346\") " pod="openshift-logging/logging-loki-compactor-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.170887 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v5ssn\" (UniqueName: \"kubernetes.io/projected/841d38f9-6814-4dc5-bb78-c154f42b3257-kube-api-access-v5ssn\") pod \"logging-loki-index-gateway-0\" (UID: \"841d38f9-6814-4dc5-bb78-c154f42b3257\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.171542 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/610ad8de-905a-4420-adc6-a0b438e2b346-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"610ad8de-905a-4420-adc6-a0b438e2b346\") " pod="openshift-logging/logging-loki-compactor-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.172106 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/610ad8de-905a-4420-adc6-a0b438e2b346-config\") pod \"logging-loki-compactor-0\" (UID: \"610ad8de-905a-4420-adc6-a0b438e2b346\") " pod="openshift-logging/logging-loki-compactor-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.175436 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/610ad8de-905a-4420-adc6-a0b438e2b346-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"610ad8de-905a-4420-adc6-a0b438e2b346\") " pod="openshift-logging/logging-loki-compactor-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.175807 4716 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.175845 4716 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-87d705e0-af27-4716-b69b-1c0e7b627147\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-87d705e0-af27-4716-b69b-1c0e7b627147\") pod \"logging-loki-compactor-0\" (UID: \"610ad8de-905a-4420-adc6-a0b438e2b346\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/96285f8d13f4b8bdec690fe9dc8b42f67051ffed0efd278808c17a6501f1ec7f/globalmount\"" pod="openshift-logging/logging-loki-compactor-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.175856 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/610ad8de-905a-4420-adc6-a0b438e2b346-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"610ad8de-905a-4420-adc6-a0b438e2b346\") " pod="openshift-logging/logging-loki-compactor-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.177419 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/610ad8de-905a-4420-adc6-a0b438e2b346-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"610ad8de-905a-4420-adc6-a0b438e2b346\") " pod="openshift-logging/logging-loki-compactor-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.188616 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hn84n\" (UniqueName: \"kubernetes.io/projected/610ad8de-905a-4420-adc6-a0b438e2b346-kube-api-access-hn84n\") pod \"logging-loki-compactor-0\" (UID: \"610ad8de-905a-4420-adc6-a0b438e2b346\") " pod="openshift-logging/logging-loki-compactor-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.206393 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-87d705e0-af27-4716-b69b-1c0e7b627147\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-87d705e0-af27-4716-b69b-1c0e7b627147\") pod \"logging-loki-compactor-0\" (UID: \"610ad8de-905a-4420-adc6-a0b438e2b346\") " pod="openshift-logging/logging-loki-compactor-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.217009 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-ingester-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.234606 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-compactor-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.274099 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/841d38f9-6814-4dc5-bb78-c154f42b3257-config\") pod \"logging-loki-index-gateway-0\" (UID: \"841d38f9-6814-4dc5-bb78-c154f42b3257\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.274195 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/841d38f9-6814-4dc5-bb78-c154f42b3257-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"841d38f9-6814-4dc5-bb78-c154f42b3257\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.274256 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/841d38f9-6814-4dc5-bb78-c154f42b3257-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"841d38f9-6814-4dc5-bb78-c154f42b3257\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.274290 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/841d38f9-6814-4dc5-bb78-c154f42b3257-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"841d38f9-6814-4dc5-bb78-c154f42b3257\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.274351 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-35584002-1f0d-4e05-9e7a-554085f01b14\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-35584002-1f0d-4e05-9e7a-554085f01b14\") pod \"logging-loki-index-gateway-0\" (UID: \"841d38f9-6814-4dc5-bb78-c154f42b3257\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.274381 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/841d38f9-6814-4dc5-bb78-c154f42b3257-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"841d38f9-6814-4dc5-bb78-c154f42b3257\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.274462 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v5ssn\" (UniqueName: \"kubernetes.io/projected/841d38f9-6814-4dc5-bb78-c154f42b3257-kube-api-access-v5ssn\") pod \"logging-loki-index-gateway-0\" (UID: \"841d38f9-6814-4dc5-bb78-c154f42b3257\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.276869 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/841d38f9-6814-4dc5-bb78-c154f42b3257-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"841d38f9-6814-4dc5-bb78-c154f42b3257\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.277416 4716 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.277469 4716 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-35584002-1f0d-4e05-9e7a-554085f01b14\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-35584002-1f0d-4e05-9e7a-554085f01b14\") pod \"logging-loki-index-gateway-0\" (UID: \"841d38f9-6814-4dc5-bb78-c154f42b3257\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/04f771be1777325cff57b22ba7556c313db9c336d02ca6786432b988d1228e43/globalmount\"" pod="openshift-logging/logging-loki-index-gateway-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.277762 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/841d38f9-6814-4dc5-bb78-c154f42b3257-config\") pod \"logging-loki-index-gateway-0\" (UID: \"841d38f9-6814-4dc5-bb78-c154f42b3257\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.278933 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/841d38f9-6814-4dc5-bb78-c154f42b3257-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"841d38f9-6814-4dc5-bb78-c154f42b3257\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.279160 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/841d38f9-6814-4dc5-bb78-c154f42b3257-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"841d38f9-6814-4dc5-bb78-c154f42b3257\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.279273 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/841d38f9-6814-4dc5-bb78-c154f42b3257-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"841d38f9-6814-4dc5-bb78-c154f42b3257\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.304472 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-35584002-1f0d-4e05-9e7a-554085f01b14\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-35584002-1f0d-4e05-9e7a-554085f01b14\") pod \"logging-loki-index-gateway-0\" (UID: \"841d38f9-6814-4dc5-bb78-c154f42b3257\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.325245 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v5ssn\" (UniqueName: \"kubernetes.io/projected/841d38f9-6814-4dc5-bb78-c154f42b3257-kube-api-access-v5ssn\") pod \"logging-loki-index-gateway-0\" (UID: \"841d38f9-6814-4dc5-bb78-c154f42b3257\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.371852 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-index-gateway-0" Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.376485 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-58996586f7-4xczf"] Dec 09 15:23:09 crc kubenswrapper[4716]: W1209 15:23:09.382600 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3047bff0_e43d_44a3_9a8e_7491cbc979af.slice/crio-05f3dd6544fb827ee983c5a74dac9f2c06c67112ebab2ca40afd5894561304f3 WatchSource:0}: Error finding container 05f3dd6544fb827ee983c5a74dac9f2c06c67112ebab2ca40afd5894561304f3: Status 404 returned error can't find the container with id 05f3dd6544fb827ee983c5a74dac9f2c06c67112ebab2ca40afd5894561304f3 Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.571720 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.625539 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-ingester-0" event={"ID":"00f16fe7-88b3-4d0a-ba38-f68a4d340686","Type":"ContainerStarted","Data":"d1962a5a4de45eb172ed4d4dd8338de7136a4db7d5b1db58709e86b25b877a24"} Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.627158 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-distributor-76cc67bf56-shzjb" event={"ID":"96be7883-9eb8-4f5c-9376-e4237b3663f8","Type":"ContainerStarted","Data":"36b904d489643fe8622868b2800c4171990b03f300b26d58eeadf14d3e3d1546"} Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.629487 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-zcnfl" event={"ID":"1c35fb07-6fe6-490e-b627-165a5500e574","Type":"ContainerStarted","Data":"0897e66af6b9bbf854dd6305d9d229a4220f5ee7c5697afe2fff2bd82d1a8fe1"} Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.630811 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-58996586f7-4xczf" event={"ID":"3047bff0-e43d-44a3-9a8e-7491cbc979af","Type":"ContainerStarted","Data":"05f3dd6544fb827ee983c5a74dac9f2c06c67112ebab2ca40afd5894561304f3"} Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.631905 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-querier-5895d59bb8-bqnhh" event={"ID":"4013658d-da41-4b67-84da-b0ca67216d45","Type":"ContainerStarted","Data":"34cff53df2a1caf2aa10023c4572ec4a9f467387703820dd823154b4d9173567"} Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.633119 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-58996586f7-7vqxj" event={"ID":"f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef","Type":"ContainerStarted","Data":"16001e209fbc0785beab31f4a3844d7778b57edd947ed067953937b81a4b237e"} Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.666717 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Dec 09 15:23:09 crc kubenswrapper[4716]: W1209 15:23:09.673528 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod841d38f9_6814_4dc5_bb78_c154f42b3257.slice/crio-c594eaea70c54be53445a4136b71a5cce5111889e2922ff8f843f138302882cb WatchSource:0}: Error finding container c594eaea70c54be53445a4136b71a5cce5111889e2922ff8f843f138302882cb: Status 404 returned error can't find the container with id c594eaea70c54be53445a4136b71a5cce5111889e2922ff8f843f138302882cb Dec 09 15:23:09 crc kubenswrapper[4716]: I1209 15:23:09.702857 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Dec 09 15:23:10 crc kubenswrapper[4716]: I1209 15:23:10.652950 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-compactor-0" event={"ID":"610ad8de-905a-4420-adc6-a0b438e2b346","Type":"ContainerStarted","Data":"0b005e484bdc4551f63c9f89c3be4e4b6505a4081617451611cb6c7df749a857"} Dec 09 15:23:10 crc kubenswrapper[4716]: I1209 15:23:10.668250 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-index-gateway-0" event={"ID":"841d38f9-6814-4dc5-bb78-c154f42b3257","Type":"ContainerStarted","Data":"c594eaea70c54be53445a4136b71a5cce5111889e2922ff8f843f138302882cb"} Dec 09 15:23:13 crc kubenswrapper[4716]: I1209 15:23:13.699366 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-zcnfl" event={"ID":"1c35fb07-6fe6-490e-b627-165a5500e574","Type":"ContainerStarted","Data":"30fef4659639772b3ccb524ff07a6d7218af36e645d5328eb9267271268f82f0"} Dec 09 15:23:13 crc kubenswrapper[4716]: I1209 15:23:13.701711 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-zcnfl" Dec 09 15:23:13 crc kubenswrapper[4716]: I1209 15:23:13.701841 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-querier-5895d59bb8-bqnhh" Dec 09 15:23:13 crc kubenswrapper[4716]: I1209 15:23:13.701944 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-querier-5895d59bb8-bqnhh" event={"ID":"4013658d-da41-4b67-84da-b0ca67216d45","Type":"ContainerStarted","Data":"74b4993eee49ec4e990d8112b3d6d920631799833d12713aba1ab2021ccd38f9"} Dec 09 15:23:13 crc kubenswrapper[4716]: I1209 15:23:13.703722 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-index-gateway-0" event={"ID":"841d38f9-6814-4dc5-bb78-c154f42b3257","Type":"ContainerStarted","Data":"86f5aaa72545f7472e4e7576eda21201d3d628e11a4a5a95454b91cb25f20517"} Dec 09 15:23:13 crc kubenswrapper[4716]: I1209 15:23:13.703845 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-index-gateway-0" Dec 09 15:23:13 crc kubenswrapper[4716]: I1209 15:23:13.706170 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-distributor-76cc67bf56-shzjb" event={"ID":"96be7883-9eb8-4f5c-9376-e4237b3663f8","Type":"ContainerStarted","Data":"3bc0e1dc4f268d017ce884680bc099525b806206939b7644f61081e75c67e50c"} Dec 09 15:23:13 crc kubenswrapper[4716]: I1209 15:23:13.706376 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-distributor-76cc67bf56-shzjb" Dec 09 15:23:13 crc kubenswrapper[4716]: I1209 15:23:13.708535 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-58996586f7-4xczf" event={"ID":"3047bff0-e43d-44a3-9a8e-7491cbc979af","Type":"ContainerStarted","Data":"d379201360473ce8e8c82be5778c149e3850e67f578e1f54e5a7395c16a20114"} Dec 09 15:23:13 crc kubenswrapper[4716]: I1209 15:23:13.710207 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-ingester-0" event={"ID":"00f16fe7-88b3-4d0a-ba38-f68a4d340686","Type":"ContainerStarted","Data":"80ffdb0adb7b4074f06e5f7b88423e77016b595e7c10fbdc061bd0c63ec0a615"} Dec 09 15:23:13 crc kubenswrapper[4716]: I1209 15:23:13.710454 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-ingester-0" Dec 09 15:23:13 crc kubenswrapper[4716]: I1209 15:23:13.711770 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-58996586f7-7vqxj" event={"ID":"f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef","Type":"ContainerStarted","Data":"42146606ab69335b24d7a3efef31f840aa20bb583eb9041ff9a8a63a5fc7da05"} Dec 09 15:23:13 crc kubenswrapper[4716]: I1209 15:23:13.713019 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-compactor-0" event={"ID":"610ad8de-905a-4420-adc6-a0b438e2b346","Type":"ContainerStarted","Data":"d1b74ed5522d11b4442cc30c53325591868f6d4a5cc3d2a6ea87db3323b50b74"} Dec 09 15:23:13 crc kubenswrapper[4716]: I1209 15:23:13.713554 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-compactor-0" Dec 09 15:23:13 crc kubenswrapper[4716]: I1209 15:23:13.725842 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-zcnfl" podStartSLOduration=2.721257872 podStartE2EDuration="6.725820594s" podCreationTimestamp="2025-12-09 15:23:07 +0000 UTC" firstStartedPulling="2025-12-09 15:23:08.82480292 +0000 UTC m=+875.979546908" lastFinishedPulling="2025-12-09 15:23:12.829365632 +0000 UTC m=+879.984109630" observedRunningTime="2025-12-09 15:23:13.719425189 +0000 UTC m=+880.874169177" watchObservedRunningTime="2025-12-09 15:23:13.725820594 +0000 UTC m=+880.880564582" Dec 09 15:23:13 crc kubenswrapper[4716]: I1209 15:23:13.754543 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-distributor-76cc67bf56-shzjb" podStartSLOduration=2.612940162 podStartE2EDuration="6.754464005s" podCreationTimestamp="2025-12-09 15:23:07 +0000 UTC" firstStartedPulling="2025-12-09 15:23:08.706085318 +0000 UTC m=+875.860829306" lastFinishedPulling="2025-12-09 15:23:12.847609161 +0000 UTC m=+880.002353149" observedRunningTime="2025-12-09 15:23:13.743846237 +0000 UTC m=+880.898590245" watchObservedRunningTime="2025-12-09 15:23:13.754464005 +0000 UTC m=+880.909207993" Dec 09 15:23:13 crc kubenswrapper[4716]: I1209 15:23:13.785978 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-compactor-0" podStartSLOduration=3.633541293 podStartE2EDuration="6.785952047s" podCreationTimestamp="2025-12-09 15:23:07 +0000 UTC" firstStartedPulling="2025-12-09 15:23:09.700962764 +0000 UTC m=+876.855706752" lastFinishedPulling="2025-12-09 15:23:12.853373518 +0000 UTC m=+880.008117506" observedRunningTime="2025-12-09 15:23:13.771737606 +0000 UTC m=+880.926481594" watchObservedRunningTime="2025-12-09 15:23:13.785952047 +0000 UTC m=+880.940696035" Dec 09 15:23:13 crc kubenswrapper[4716]: I1209 15:23:13.801376 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-querier-5895d59bb8-bqnhh" podStartSLOduration=2.874468925 podStartE2EDuration="6.801353304s" podCreationTimestamp="2025-12-09 15:23:07 +0000 UTC" firstStartedPulling="2025-12-09 15:23:08.963882523 +0000 UTC m=+876.118626511" lastFinishedPulling="2025-12-09 15:23:12.890766902 +0000 UTC m=+880.045510890" observedRunningTime="2025-12-09 15:23:13.799339115 +0000 UTC m=+880.954083103" watchObservedRunningTime="2025-12-09 15:23:13.801353304 +0000 UTC m=+880.956097282" Dec 09 15:23:13 crc kubenswrapper[4716]: I1209 15:23:13.824635 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-ingester-0" podStartSLOduration=3.585228862 podStartE2EDuration="6.824582557s" podCreationTimestamp="2025-12-09 15:23:07 +0000 UTC" firstStartedPulling="2025-12-09 15:23:09.5915019 +0000 UTC m=+876.746245888" lastFinishedPulling="2025-12-09 15:23:12.830855585 +0000 UTC m=+879.985599583" observedRunningTime="2025-12-09 15:23:13.823272099 +0000 UTC m=+880.978016087" watchObservedRunningTime="2025-12-09 15:23:13.824582557 +0000 UTC m=+880.979326545" Dec 09 15:23:13 crc kubenswrapper[4716]: I1209 15:23:13.847512 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-index-gateway-0" podStartSLOduration=3.722782561 podStartE2EDuration="6.847489121s" podCreationTimestamp="2025-12-09 15:23:07 +0000 UTC" firstStartedPulling="2025-12-09 15:23:09.676234667 +0000 UTC m=+876.830978655" lastFinishedPulling="2025-12-09 15:23:12.800941227 +0000 UTC m=+879.955685215" observedRunningTime="2025-12-09 15:23:13.843367542 +0000 UTC m=+880.998111540" watchObservedRunningTime="2025-12-09 15:23:13.847489121 +0000 UTC m=+881.002233109" Dec 09 15:23:15 crc kubenswrapper[4716]: I1209 15:23:15.732355 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-58996586f7-4xczf" event={"ID":"3047bff0-e43d-44a3-9a8e-7491cbc979af","Type":"ContainerStarted","Data":"812f12ffb3b641cdfb08f6b5a6813616d302af7728b91d37dda9f3e5af33d0cd"} Dec 09 15:23:15 crc kubenswrapper[4716]: I1209 15:23:15.733257 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-58996586f7-4xczf" Dec 09 15:23:15 crc kubenswrapper[4716]: I1209 15:23:15.736252 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-58996586f7-7vqxj" event={"ID":"f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef","Type":"ContainerStarted","Data":"ed9b796bee467475c1ca341af81b0ed3d8ebc806603a5f1d346a3bbbc44adbbe"} Dec 09 15:23:15 crc kubenswrapper[4716]: I1209 15:23:15.743047 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-58996586f7-4xczf" Dec 09 15:23:15 crc kubenswrapper[4716]: I1209 15:23:15.761030 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-gateway-58996586f7-4xczf" podStartSLOduration=1.6483256590000002 podStartE2EDuration="7.760996924s" podCreationTimestamp="2025-12-09 15:23:08 +0000 UTC" firstStartedPulling="2025-12-09 15:23:09.387168796 +0000 UTC m=+876.541912784" lastFinishedPulling="2025-12-09 15:23:15.499840061 +0000 UTC m=+882.654584049" observedRunningTime="2025-12-09 15:23:15.752302282 +0000 UTC m=+882.907046260" watchObservedRunningTime="2025-12-09 15:23:15.760996924 +0000 UTC m=+882.915740912" Dec 09 15:23:15 crc kubenswrapper[4716]: I1209 15:23:15.802827 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-gateway-58996586f7-7vqxj" podStartSLOduration=1.331919224 podStartE2EDuration="7.802798476s" podCreationTimestamp="2025-12-09 15:23:08 +0000 UTC" firstStartedPulling="2025-12-09 15:23:09.030198335 +0000 UTC m=+876.184942323" lastFinishedPulling="2025-12-09 15:23:15.501077587 +0000 UTC m=+882.655821575" observedRunningTime="2025-12-09 15:23:15.79742867 +0000 UTC m=+882.952172658" watchObservedRunningTime="2025-12-09 15:23:15.802798476 +0000 UTC m=+882.957542464" Dec 09 15:23:16 crc kubenswrapper[4716]: I1209 15:23:16.743972 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-58996586f7-7vqxj" Dec 09 15:23:16 crc kubenswrapper[4716]: I1209 15:23:16.744226 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-58996586f7-7vqxj" Dec 09 15:23:16 crc kubenswrapper[4716]: I1209 15:23:16.744257 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-58996586f7-4xczf" Dec 09 15:23:16 crc kubenswrapper[4716]: I1209 15:23:16.755586 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-58996586f7-7vqxj" Dec 09 15:23:16 crc kubenswrapper[4716]: I1209 15:23:16.756638 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-58996586f7-4xczf" Dec 09 15:23:16 crc kubenswrapper[4716]: I1209 15:23:16.761138 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-58996586f7-7vqxj" Dec 09 15:23:26 crc kubenswrapper[4716]: I1209 15:23:26.010133 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-7bqww"] Dec 09 15:23:26 crc kubenswrapper[4716]: I1209 15:23:26.016787 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7bqww" Dec 09 15:23:26 crc kubenswrapper[4716]: I1209 15:23:26.074571 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7bqww"] Dec 09 15:23:26 crc kubenswrapper[4716]: I1209 15:23:26.159605 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85cb1fd9-d5ae-45f6-93fc-abdddd8acb11-catalog-content\") pod \"community-operators-7bqww\" (UID: \"85cb1fd9-d5ae-45f6-93fc-abdddd8acb11\") " pod="openshift-marketplace/community-operators-7bqww" Dec 09 15:23:26 crc kubenswrapper[4716]: I1209 15:23:26.160232 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cqf6p\" (UniqueName: \"kubernetes.io/projected/85cb1fd9-d5ae-45f6-93fc-abdddd8acb11-kube-api-access-cqf6p\") pod \"community-operators-7bqww\" (UID: \"85cb1fd9-d5ae-45f6-93fc-abdddd8acb11\") " pod="openshift-marketplace/community-operators-7bqww" Dec 09 15:23:26 crc kubenswrapper[4716]: I1209 15:23:26.160432 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85cb1fd9-d5ae-45f6-93fc-abdddd8acb11-utilities\") pod \"community-operators-7bqww\" (UID: \"85cb1fd9-d5ae-45f6-93fc-abdddd8acb11\") " pod="openshift-marketplace/community-operators-7bqww" Dec 09 15:23:26 crc kubenswrapper[4716]: I1209 15:23:26.262747 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqf6p\" (UniqueName: \"kubernetes.io/projected/85cb1fd9-d5ae-45f6-93fc-abdddd8acb11-kube-api-access-cqf6p\") pod \"community-operators-7bqww\" (UID: \"85cb1fd9-d5ae-45f6-93fc-abdddd8acb11\") " pod="openshift-marketplace/community-operators-7bqww" Dec 09 15:23:26 crc kubenswrapper[4716]: I1209 15:23:26.262836 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85cb1fd9-d5ae-45f6-93fc-abdddd8acb11-utilities\") pod \"community-operators-7bqww\" (UID: \"85cb1fd9-d5ae-45f6-93fc-abdddd8acb11\") " pod="openshift-marketplace/community-operators-7bqww" Dec 09 15:23:26 crc kubenswrapper[4716]: I1209 15:23:26.262906 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85cb1fd9-d5ae-45f6-93fc-abdddd8acb11-catalog-content\") pod \"community-operators-7bqww\" (UID: \"85cb1fd9-d5ae-45f6-93fc-abdddd8acb11\") " pod="openshift-marketplace/community-operators-7bqww" Dec 09 15:23:26 crc kubenswrapper[4716]: I1209 15:23:26.263569 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85cb1fd9-d5ae-45f6-93fc-abdddd8acb11-utilities\") pod \"community-operators-7bqww\" (UID: \"85cb1fd9-d5ae-45f6-93fc-abdddd8acb11\") " pod="openshift-marketplace/community-operators-7bqww" Dec 09 15:23:26 crc kubenswrapper[4716]: I1209 15:23:26.263697 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85cb1fd9-d5ae-45f6-93fc-abdddd8acb11-catalog-content\") pod \"community-operators-7bqww\" (UID: \"85cb1fd9-d5ae-45f6-93fc-abdddd8acb11\") " pod="openshift-marketplace/community-operators-7bqww" Dec 09 15:23:26 crc kubenswrapper[4716]: I1209 15:23:26.287342 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqf6p\" (UniqueName: \"kubernetes.io/projected/85cb1fd9-d5ae-45f6-93fc-abdddd8acb11-kube-api-access-cqf6p\") pod \"community-operators-7bqww\" (UID: \"85cb1fd9-d5ae-45f6-93fc-abdddd8acb11\") " pod="openshift-marketplace/community-operators-7bqww" Dec 09 15:23:26 crc kubenswrapper[4716]: I1209 15:23:26.341597 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7bqww" Dec 09 15:23:26 crc kubenswrapper[4716]: I1209 15:23:26.876901 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7bqww"] Dec 09 15:23:27 crc kubenswrapper[4716]: I1209 15:23:27.848809 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7bqww" event={"ID":"85cb1fd9-d5ae-45f6-93fc-abdddd8acb11","Type":"ContainerStarted","Data":"45097dde79c451ed6cdbc8a88013c97ac988864e613b9dd31632e18b3bb43b10"} Dec 09 15:23:28 crc kubenswrapper[4716]: I1209 15:23:28.093597 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-querier-5895d59bb8-bqnhh" Dec 09 15:23:28 crc kubenswrapper[4716]: I1209 15:23:28.268115 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-zcnfl" Dec 09 15:23:28 crc kubenswrapper[4716]: I1209 15:23:28.859717 4716 generic.go:334] "Generic (PLEG): container finished" podID="85cb1fd9-d5ae-45f6-93fc-abdddd8acb11" containerID="a4d70b68c80fa07167f6c810772b578d58d6f0261a69c998ac3173878dd9b4c3" exitCode=0 Dec 09 15:23:28 crc kubenswrapper[4716]: I1209 15:23:28.859787 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7bqww" event={"ID":"85cb1fd9-d5ae-45f6-93fc-abdddd8acb11","Type":"ContainerDied","Data":"a4d70b68c80fa07167f6c810772b578d58d6f0261a69c998ac3173878dd9b4c3"} Dec 09 15:23:29 crc kubenswrapper[4716]: I1209 15:23:29.225076 4716 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: this instance owns no tokens Dec 09 15:23:29 crc kubenswrapper[4716]: I1209 15:23:29.225156 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="00f16fe7-88b3-4d0a-ba38-f68a4d340686" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Dec 09 15:23:29 crc kubenswrapper[4716]: I1209 15:23:29.243177 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-compactor-0" Dec 09 15:23:29 crc kubenswrapper[4716]: I1209 15:23:29.378188 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-index-gateway-0" Dec 09 15:23:29 crc kubenswrapper[4716]: I1209 15:23:29.870670 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7bqww" event={"ID":"85cb1fd9-d5ae-45f6-93fc-abdddd8acb11","Type":"ContainerStarted","Data":"87bcd2005b7783179c868fc752b06910a13c8dea7518b2ecd9ab9612deef7a5e"} Dec 09 15:23:30 crc kubenswrapper[4716]: I1209 15:23:30.881284 4716 generic.go:334] "Generic (PLEG): container finished" podID="85cb1fd9-d5ae-45f6-93fc-abdddd8acb11" containerID="87bcd2005b7783179c868fc752b06910a13c8dea7518b2ecd9ab9612deef7a5e" exitCode=0 Dec 09 15:23:30 crc kubenswrapper[4716]: I1209 15:23:30.881355 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7bqww" event={"ID":"85cb1fd9-d5ae-45f6-93fc-abdddd8acb11","Type":"ContainerDied","Data":"87bcd2005b7783179c868fc752b06910a13c8dea7518b2ecd9ab9612deef7a5e"} Dec 09 15:23:31 crc kubenswrapper[4716]: I1209 15:23:31.892362 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7bqww" event={"ID":"85cb1fd9-d5ae-45f6-93fc-abdddd8acb11","Type":"ContainerStarted","Data":"34b3a9801ac242ee3b753d75fdf26b0e8c0dfd20e9203114c4712f9e114d9ce5"} Dec 09 15:23:31 crc kubenswrapper[4716]: I1209 15:23:31.914451 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-7bqww" podStartSLOduration=4.464832153 podStartE2EDuration="6.914419799s" podCreationTimestamp="2025-12-09 15:23:25 +0000 UTC" firstStartedPulling="2025-12-09 15:23:28.862314884 +0000 UTC m=+896.017058872" lastFinishedPulling="2025-12-09 15:23:31.31190253 +0000 UTC m=+898.466646518" observedRunningTime="2025-12-09 15:23:31.911862785 +0000 UTC m=+899.066606773" watchObservedRunningTime="2025-12-09 15:23:31.914419799 +0000 UTC m=+899.069163797" Dec 09 15:23:34 crc kubenswrapper[4716]: I1209 15:23:34.188351 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-767b7"] Dec 09 15:23:34 crc kubenswrapper[4716]: I1209 15:23:34.190543 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-767b7" Dec 09 15:23:34 crc kubenswrapper[4716]: I1209 15:23:34.200695 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-767b7"] Dec 09 15:23:34 crc kubenswrapper[4716]: I1209 15:23:34.229790 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac-utilities\") pod \"redhat-marketplace-767b7\" (UID: \"15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac\") " pod="openshift-marketplace/redhat-marketplace-767b7" Dec 09 15:23:34 crc kubenswrapper[4716]: I1209 15:23:34.231808 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac-catalog-content\") pod \"redhat-marketplace-767b7\" (UID: \"15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac\") " pod="openshift-marketplace/redhat-marketplace-767b7" Dec 09 15:23:34 crc kubenswrapper[4716]: I1209 15:23:34.231881 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tn2b5\" (UniqueName: \"kubernetes.io/projected/15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac-kube-api-access-tn2b5\") pod \"redhat-marketplace-767b7\" (UID: \"15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac\") " pod="openshift-marketplace/redhat-marketplace-767b7" Dec 09 15:23:34 crc kubenswrapper[4716]: I1209 15:23:34.334345 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac-catalog-content\") pod \"redhat-marketplace-767b7\" (UID: \"15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac\") " pod="openshift-marketplace/redhat-marketplace-767b7" Dec 09 15:23:34 crc kubenswrapper[4716]: I1209 15:23:34.334403 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tn2b5\" (UniqueName: \"kubernetes.io/projected/15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac-kube-api-access-tn2b5\") pod \"redhat-marketplace-767b7\" (UID: \"15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac\") " pod="openshift-marketplace/redhat-marketplace-767b7" Dec 09 15:23:34 crc kubenswrapper[4716]: I1209 15:23:34.334448 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac-utilities\") pod \"redhat-marketplace-767b7\" (UID: \"15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac\") " pod="openshift-marketplace/redhat-marketplace-767b7" Dec 09 15:23:34 crc kubenswrapper[4716]: I1209 15:23:34.335080 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac-catalog-content\") pod \"redhat-marketplace-767b7\" (UID: \"15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac\") " pod="openshift-marketplace/redhat-marketplace-767b7" Dec 09 15:23:34 crc kubenswrapper[4716]: I1209 15:23:34.335120 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac-utilities\") pod \"redhat-marketplace-767b7\" (UID: \"15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac\") " pod="openshift-marketplace/redhat-marketplace-767b7" Dec 09 15:23:34 crc kubenswrapper[4716]: I1209 15:23:34.363541 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tn2b5\" (UniqueName: \"kubernetes.io/projected/15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac-kube-api-access-tn2b5\") pod \"redhat-marketplace-767b7\" (UID: \"15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac\") " pod="openshift-marketplace/redhat-marketplace-767b7" Dec 09 15:23:34 crc kubenswrapper[4716]: I1209 15:23:34.513834 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-767b7" Dec 09 15:23:34 crc kubenswrapper[4716]: I1209 15:23:34.831445 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-kd9gx"] Dec 09 15:23:34 crc kubenswrapper[4716]: I1209 15:23:34.835101 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kd9gx" Dec 09 15:23:34 crc kubenswrapper[4716]: I1209 15:23:34.845806 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb7f6824-789e-49ca-b406-a4c83ca01f62-utilities\") pod \"certified-operators-kd9gx\" (UID: \"fb7f6824-789e-49ca-b406-a4c83ca01f62\") " pod="openshift-marketplace/certified-operators-kd9gx" Dec 09 15:23:34 crc kubenswrapper[4716]: I1209 15:23:34.845892 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ph76h\" (UniqueName: \"kubernetes.io/projected/fb7f6824-789e-49ca-b406-a4c83ca01f62-kube-api-access-ph76h\") pod \"certified-operators-kd9gx\" (UID: \"fb7f6824-789e-49ca-b406-a4c83ca01f62\") " pod="openshift-marketplace/certified-operators-kd9gx" Dec 09 15:23:34 crc kubenswrapper[4716]: I1209 15:23:34.845953 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb7f6824-789e-49ca-b406-a4c83ca01f62-catalog-content\") pod \"certified-operators-kd9gx\" (UID: \"fb7f6824-789e-49ca-b406-a4c83ca01f62\") " pod="openshift-marketplace/certified-operators-kd9gx" Dec 09 15:23:34 crc kubenswrapper[4716]: I1209 15:23:34.846193 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kd9gx"] Dec 09 15:23:34 crc kubenswrapper[4716]: I1209 15:23:34.891852 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-767b7"] Dec 09 15:23:34 crc kubenswrapper[4716]: I1209 15:23:34.928964 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-767b7" event={"ID":"15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac","Type":"ContainerStarted","Data":"934b4e32c8634077ea1fd8e215ac88dc2df073708aec022debeca54df2a0d00c"} Dec 09 15:23:34 crc kubenswrapper[4716]: I1209 15:23:34.947430 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb7f6824-789e-49ca-b406-a4c83ca01f62-utilities\") pod \"certified-operators-kd9gx\" (UID: \"fb7f6824-789e-49ca-b406-a4c83ca01f62\") " pod="openshift-marketplace/certified-operators-kd9gx" Dec 09 15:23:34 crc kubenswrapper[4716]: I1209 15:23:34.947503 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ph76h\" (UniqueName: \"kubernetes.io/projected/fb7f6824-789e-49ca-b406-a4c83ca01f62-kube-api-access-ph76h\") pod \"certified-operators-kd9gx\" (UID: \"fb7f6824-789e-49ca-b406-a4c83ca01f62\") " pod="openshift-marketplace/certified-operators-kd9gx" Dec 09 15:23:34 crc kubenswrapper[4716]: I1209 15:23:34.947546 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb7f6824-789e-49ca-b406-a4c83ca01f62-catalog-content\") pod \"certified-operators-kd9gx\" (UID: \"fb7f6824-789e-49ca-b406-a4c83ca01f62\") " pod="openshift-marketplace/certified-operators-kd9gx" Dec 09 15:23:34 crc kubenswrapper[4716]: I1209 15:23:34.948079 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb7f6824-789e-49ca-b406-a4c83ca01f62-utilities\") pod \"certified-operators-kd9gx\" (UID: \"fb7f6824-789e-49ca-b406-a4c83ca01f62\") " pod="openshift-marketplace/certified-operators-kd9gx" Dec 09 15:23:34 crc kubenswrapper[4716]: I1209 15:23:34.948104 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb7f6824-789e-49ca-b406-a4c83ca01f62-catalog-content\") pod \"certified-operators-kd9gx\" (UID: \"fb7f6824-789e-49ca-b406-a4c83ca01f62\") " pod="openshift-marketplace/certified-operators-kd9gx" Dec 09 15:23:34 crc kubenswrapper[4716]: I1209 15:23:34.969271 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ph76h\" (UniqueName: \"kubernetes.io/projected/fb7f6824-789e-49ca-b406-a4c83ca01f62-kube-api-access-ph76h\") pod \"certified-operators-kd9gx\" (UID: \"fb7f6824-789e-49ca-b406-a4c83ca01f62\") " pod="openshift-marketplace/certified-operators-kd9gx" Dec 09 15:23:35 crc kubenswrapper[4716]: I1209 15:23:35.160494 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kd9gx" Dec 09 15:23:35 crc kubenswrapper[4716]: I1209 15:23:35.713925 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kd9gx"] Dec 09 15:23:35 crc kubenswrapper[4716]: I1209 15:23:35.938211 4716 generic.go:334] "Generic (PLEG): container finished" podID="15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac" containerID="4064482d401111cbec7d1f3fa6096ab14d061aaa0cdb8881836d760b36890175" exitCode=0 Dec 09 15:23:35 crc kubenswrapper[4716]: I1209 15:23:35.938324 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-767b7" event={"ID":"15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac","Type":"ContainerDied","Data":"4064482d401111cbec7d1f3fa6096ab14d061aaa0cdb8881836d760b36890175"} Dec 09 15:23:35 crc kubenswrapper[4716]: I1209 15:23:35.940640 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kd9gx" event={"ID":"fb7f6824-789e-49ca-b406-a4c83ca01f62","Type":"ContainerStarted","Data":"1be484cdcfc6c5059aa0918d2ef18d3346618ec9b9c2bfc82f967ddc11b3e471"} Dec 09 15:23:35 crc kubenswrapper[4716]: I1209 15:23:35.940686 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kd9gx" event={"ID":"fb7f6824-789e-49ca-b406-a4c83ca01f62","Type":"ContainerStarted","Data":"e3d5f8599a71e11ab34e8bfa2541855fd0c41dc67f9448a187ef8b34f4ba487b"} Dec 09 15:23:36 crc kubenswrapper[4716]: I1209 15:23:36.342310 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-7bqww" Dec 09 15:23:36 crc kubenswrapper[4716]: I1209 15:23:36.342374 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-7bqww" Dec 09 15:23:36 crc kubenswrapper[4716]: I1209 15:23:36.386843 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-7bqww" Dec 09 15:23:36 crc kubenswrapper[4716]: I1209 15:23:36.950232 4716 generic.go:334] "Generic (PLEG): container finished" podID="fb7f6824-789e-49ca-b406-a4c83ca01f62" containerID="1be484cdcfc6c5059aa0918d2ef18d3346618ec9b9c2bfc82f967ddc11b3e471" exitCode=0 Dec 09 15:23:36 crc kubenswrapper[4716]: I1209 15:23:36.950376 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kd9gx" event={"ID":"fb7f6824-789e-49ca-b406-a4c83ca01f62","Type":"ContainerDied","Data":"1be484cdcfc6c5059aa0918d2ef18d3346618ec9b9c2bfc82f967ddc11b3e471"} Dec 09 15:23:36 crc kubenswrapper[4716]: I1209 15:23:36.954434 4716 generic.go:334] "Generic (PLEG): container finished" podID="15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac" containerID="d24426bc1481b8fd150b9f62190de54e4463b8de149857504e4ab9be050770ac" exitCode=0 Dec 09 15:23:36 crc kubenswrapper[4716]: I1209 15:23:36.954690 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-767b7" event={"ID":"15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac","Type":"ContainerDied","Data":"d24426bc1481b8fd150b9f62190de54e4463b8de149857504e4ab9be050770ac"} Dec 09 15:23:37 crc kubenswrapper[4716]: I1209 15:23:37.041614 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-7bqww" Dec 09 15:23:37 crc kubenswrapper[4716]: I1209 15:23:37.924794 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-distributor-76cc67bf56-shzjb" Dec 09 15:23:38 crc kubenswrapper[4716]: I1209 15:23:38.973531 4716 generic.go:334] "Generic (PLEG): container finished" podID="fb7f6824-789e-49ca-b406-a4c83ca01f62" containerID="d08a4fbb13ec13e9af5d3ac52e3dca5c6b9ecf03f2857fa2bf93cc78ba4ebe42" exitCode=0 Dec 09 15:23:38 crc kubenswrapper[4716]: I1209 15:23:38.973700 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kd9gx" event={"ID":"fb7f6824-789e-49ca-b406-a4c83ca01f62","Type":"ContainerDied","Data":"d08a4fbb13ec13e9af5d3ac52e3dca5c6b9ecf03f2857fa2bf93cc78ba4ebe42"} Dec 09 15:23:38 crc kubenswrapper[4716]: I1209 15:23:38.978890 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-767b7" event={"ID":"15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac","Type":"ContainerStarted","Data":"488380d58b6725c50fc82fb6bac14d415ea3759b11f5fba2d2bb4c5e1728e44c"} Dec 09 15:23:39 crc kubenswrapper[4716]: I1209 15:23:39.021243 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-767b7" podStartSLOduration=2.880503559 podStartE2EDuration="5.021208869s" podCreationTimestamp="2025-12-09 15:23:34 +0000 UTC" firstStartedPulling="2025-12-09 15:23:35.940071562 +0000 UTC m=+903.094815550" lastFinishedPulling="2025-12-09 15:23:38.080776872 +0000 UTC m=+905.235520860" observedRunningTime="2025-12-09 15:23:39.01710873 +0000 UTC m=+906.171852718" watchObservedRunningTime="2025-12-09 15:23:39.021208869 +0000 UTC m=+906.175952857" Dec 09 15:23:39 crc kubenswrapper[4716]: I1209 15:23:39.176543 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7bqww"] Dec 09 15:23:39 crc kubenswrapper[4716]: I1209 15:23:39.176880 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-7bqww" podUID="85cb1fd9-d5ae-45f6-93fc-abdddd8acb11" containerName="registry-server" containerID="cri-o://34b3a9801ac242ee3b753d75fdf26b0e8c0dfd20e9203114c4712f9e114d9ce5" gracePeriod=2 Dec 09 15:23:39 crc kubenswrapper[4716]: I1209 15:23:39.222925 4716 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: this instance owns no tokens Dec 09 15:23:39 crc kubenswrapper[4716]: I1209 15:23:39.222995 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="00f16fe7-88b3-4d0a-ba38-f68a4d340686" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Dec 09 15:23:39 crc kubenswrapper[4716]: I1209 15:23:39.614702 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7bqww" Dec 09 15:23:39 crc kubenswrapper[4716]: I1209 15:23:39.748611 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85cb1fd9-d5ae-45f6-93fc-abdddd8acb11-catalog-content\") pod \"85cb1fd9-d5ae-45f6-93fc-abdddd8acb11\" (UID: \"85cb1fd9-d5ae-45f6-93fc-abdddd8acb11\") " Dec 09 15:23:39 crc kubenswrapper[4716]: I1209 15:23:39.748767 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cqf6p\" (UniqueName: \"kubernetes.io/projected/85cb1fd9-d5ae-45f6-93fc-abdddd8acb11-kube-api-access-cqf6p\") pod \"85cb1fd9-d5ae-45f6-93fc-abdddd8acb11\" (UID: \"85cb1fd9-d5ae-45f6-93fc-abdddd8acb11\") " Dec 09 15:23:39 crc kubenswrapper[4716]: I1209 15:23:39.748835 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85cb1fd9-d5ae-45f6-93fc-abdddd8acb11-utilities\") pod \"85cb1fd9-d5ae-45f6-93fc-abdddd8acb11\" (UID: \"85cb1fd9-d5ae-45f6-93fc-abdddd8acb11\") " Dec 09 15:23:39 crc kubenswrapper[4716]: I1209 15:23:39.749568 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/85cb1fd9-d5ae-45f6-93fc-abdddd8acb11-utilities" (OuterVolumeSpecName: "utilities") pod "85cb1fd9-d5ae-45f6-93fc-abdddd8acb11" (UID: "85cb1fd9-d5ae-45f6-93fc-abdddd8acb11"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:23:39 crc kubenswrapper[4716]: I1209 15:23:39.755449 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/85cb1fd9-d5ae-45f6-93fc-abdddd8acb11-kube-api-access-cqf6p" (OuterVolumeSpecName: "kube-api-access-cqf6p") pod "85cb1fd9-d5ae-45f6-93fc-abdddd8acb11" (UID: "85cb1fd9-d5ae-45f6-93fc-abdddd8acb11"). InnerVolumeSpecName "kube-api-access-cqf6p". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:23:39 crc kubenswrapper[4716]: I1209 15:23:39.811434 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/85cb1fd9-d5ae-45f6-93fc-abdddd8acb11-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "85cb1fd9-d5ae-45f6-93fc-abdddd8acb11" (UID: "85cb1fd9-d5ae-45f6-93fc-abdddd8acb11"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:23:39 crc kubenswrapper[4716]: I1209 15:23:39.850474 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85cb1fd9-d5ae-45f6-93fc-abdddd8acb11-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 15:23:39 crc kubenswrapper[4716]: I1209 15:23:39.850516 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cqf6p\" (UniqueName: \"kubernetes.io/projected/85cb1fd9-d5ae-45f6-93fc-abdddd8acb11-kube-api-access-cqf6p\") on node \"crc\" DevicePath \"\"" Dec 09 15:23:39 crc kubenswrapper[4716]: I1209 15:23:39.850529 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85cb1fd9-d5ae-45f6-93fc-abdddd8acb11-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 15:23:39 crc kubenswrapper[4716]: I1209 15:23:39.988584 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kd9gx" event={"ID":"fb7f6824-789e-49ca-b406-a4c83ca01f62","Type":"ContainerStarted","Data":"6458f667ebdac11f7b62ad629342f496dce53bdfd401b290364337f781870e5b"} Dec 09 15:23:39 crc kubenswrapper[4716]: I1209 15:23:39.991100 4716 generic.go:334] "Generic (PLEG): container finished" podID="85cb1fd9-d5ae-45f6-93fc-abdddd8acb11" containerID="34b3a9801ac242ee3b753d75fdf26b0e8c0dfd20e9203114c4712f9e114d9ce5" exitCode=0 Dec 09 15:23:39 crc kubenswrapper[4716]: I1209 15:23:39.991158 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7bqww" Dec 09 15:23:39 crc kubenswrapper[4716]: I1209 15:23:39.991179 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7bqww" event={"ID":"85cb1fd9-d5ae-45f6-93fc-abdddd8acb11","Type":"ContainerDied","Data":"34b3a9801ac242ee3b753d75fdf26b0e8c0dfd20e9203114c4712f9e114d9ce5"} Dec 09 15:23:39 crc kubenswrapper[4716]: I1209 15:23:39.991220 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7bqww" event={"ID":"85cb1fd9-d5ae-45f6-93fc-abdddd8acb11","Type":"ContainerDied","Data":"45097dde79c451ed6cdbc8a88013c97ac988864e613b9dd31632e18b3bb43b10"} Dec 09 15:23:39 crc kubenswrapper[4716]: I1209 15:23:39.991243 4716 scope.go:117] "RemoveContainer" containerID="34b3a9801ac242ee3b753d75fdf26b0e8c0dfd20e9203114c4712f9e114d9ce5" Dec 09 15:23:40 crc kubenswrapper[4716]: I1209 15:23:40.010081 4716 scope.go:117] "RemoveContainer" containerID="87bcd2005b7783179c868fc752b06910a13c8dea7518b2ecd9ab9612deef7a5e" Dec 09 15:23:40 crc kubenswrapper[4716]: I1209 15:23:40.015511 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-kd9gx" podStartSLOduration=3.600225569 podStartE2EDuration="6.015484868s" podCreationTimestamp="2025-12-09 15:23:34 +0000 UTC" firstStartedPulling="2025-12-09 15:23:36.952393355 +0000 UTC m=+904.107137343" lastFinishedPulling="2025-12-09 15:23:39.367652654 +0000 UTC m=+906.522396642" observedRunningTime="2025-12-09 15:23:40.010136543 +0000 UTC m=+907.164880551" watchObservedRunningTime="2025-12-09 15:23:40.015484868 +0000 UTC m=+907.170228856" Dec 09 15:23:40 crc kubenswrapper[4716]: I1209 15:23:40.036321 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7bqww"] Dec 09 15:23:40 crc kubenswrapper[4716]: I1209 15:23:40.041570 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-7bqww"] Dec 09 15:23:40 crc kubenswrapper[4716]: I1209 15:23:40.057963 4716 scope.go:117] "RemoveContainer" containerID="a4d70b68c80fa07167f6c810772b578d58d6f0261a69c998ac3173878dd9b4c3" Dec 09 15:23:40 crc kubenswrapper[4716]: I1209 15:23:40.084287 4716 scope.go:117] "RemoveContainer" containerID="34b3a9801ac242ee3b753d75fdf26b0e8c0dfd20e9203114c4712f9e114d9ce5" Dec 09 15:23:40 crc kubenswrapper[4716]: E1209 15:23:40.085056 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"34b3a9801ac242ee3b753d75fdf26b0e8c0dfd20e9203114c4712f9e114d9ce5\": container with ID starting with 34b3a9801ac242ee3b753d75fdf26b0e8c0dfd20e9203114c4712f9e114d9ce5 not found: ID does not exist" containerID="34b3a9801ac242ee3b753d75fdf26b0e8c0dfd20e9203114c4712f9e114d9ce5" Dec 09 15:23:40 crc kubenswrapper[4716]: I1209 15:23:40.085140 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"34b3a9801ac242ee3b753d75fdf26b0e8c0dfd20e9203114c4712f9e114d9ce5"} err="failed to get container status \"34b3a9801ac242ee3b753d75fdf26b0e8c0dfd20e9203114c4712f9e114d9ce5\": rpc error: code = NotFound desc = could not find container \"34b3a9801ac242ee3b753d75fdf26b0e8c0dfd20e9203114c4712f9e114d9ce5\": container with ID starting with 34b3a9801ac242ee3b753d75fdf26b0e8c0dfd20e9203114c4712f9e114d9ce5 not found: ID does not exist" Dec 09 15:23:40 crc kubenswrapper[4716]: I1209 15:23:40.085191 4716 scope.go:117] "RemoveContainer" containerID="87bcd2005b7783179c868fc752b06910a13c8dea7518b2ecd9ab9612deef7a5e" Dec 09 15:23:40 crc kubenswrapper[4716]: E1209 15:23:40.085740 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"87bcd2005b7783179c868fc752b06910a13c8dea7518b2ecd9ab9612deef7a5e\": container with ID starting with 87bcd2005b7783179c868fc752b06910a13c8dea7518b2ecd9ab9612deef7a5e not found: ID does not exist" containerID="87bcd2005b7783179c868fc752b06910a13c8dea7518b2ecd9ab9612deef7a5e" Dec 09 15:23:40 crc kubenswrapper[4716]: I1209 15:23:40.085775 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"87bcd2005b7783179c868fc752b06910a13c8dea7518b2ecd9ab9612deef7a5e"} err="failed to get container status \"87bcd2005b7783179c868fc752b06910a13c8dea7518b2ecd9ab9612deef7a5e\": rpc error: code = NotFound desc = could not find container \"87bcd2005b7783179c868fc752b06910a13c8dea7518b2ecd9ab9612deef7a5e\": container with ID starting with 87bcd2005b7783179c868fc752b06910a13c8dea7518b2ecd9ab9612deef7a5e not found: ID does not exist" Dec 09 15:23:40 crc kubenswrapper[4716]: I1209 15:23:40.085803 4716 scope.go:117] "RemoveContainer" containerID="a4d70b68c80fa07167f6c810772b578d58d6f0261a69c998ac3173878dd9b4c3" Dec 09 15:23:40 crc kubenswrapper[4716]: E1209 15:23:40.086135 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a4d70b68c80fa07167f6c810772b578d58d6f0261a69c998ac3173878dd9b4c3\": container with ID starting with a4d70b68c80fa07167f6c810772b578d58d6f0261a69c998ac3173878dd9b4c3 not found: ID does not exist" containerID="a4d70b68c80fa07167f6c810772b578d58d6f0261a69c998ac3173878dd9b4c3" Dec 09 15:23:40 crc kubenswrapper[4716]: I1209 15:23:40.086185 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a4d70b68c80fa07167f6c810772b578d58d6f0261a69c998ac3173878dd9b4c3"} err="failed to get container status \"a4d70b68c80fa07167f6c810772b578d58d6f0261a69c998ac3173878dd9b4c3\": rpc error: code = NotFound desc = could not find container \"a4d70b68c80fa07167f6c810772b578d58d6f0261a69c998ac3173878dd9b4c3\": container with ID starting with a4d70b68c80fa07167f6c810772b578d58d6f0261a69c998ac3173878dd9b4c3 not found: ID does not exist" Dec 09 15:23:41 crc kubenswrapper[4716]: I1209 15:23:41.225726 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="85cb1fd9-d5ae-45f6-93fc-abdddd8acb11" path="/var/lib/kubelet/pods/85cb1fd9-d5ae-45f6-93fc-abdddd8acb11/volumes" Dec 09 15:23:44 crc kubenswrapper[4716]: I1209 15:23:44.515066 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-767b7" Dec 09 15:23:44 crc kubenswrapper[4716]: I1209 15:23:44.515899 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-767b7" Dec 09 15:23:44 crc kubenswrapper[4716]: I1209 15:23:44.570327 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-767b7" Dec 09 15:23:45 crc kubenswrapper[4716]: I1209 15:23:45.087498 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-767b7" Dec 09 15:23:45 crc kubenswrapper[4716]: I1209 15:23:45.134529 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-767b7"] Dec 09 15:23:45 crc kubenswrapper[4716]: I1209 15:23:45.161001 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-kd9gx" Dec 09 15:23:45 crc kubenswrapper[4716]: I1209 15:23:45.161462 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-kd9gx" Dec 09 15:23:45 crc kubenswrapper[4716]: I1209 15:23:45.247304 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-kd9gx" Dec 09 15:23:46 crc kubenswrapper[4716]: I1209 15:23:46.093456 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-kd9gx" Dec 09 15:23:47 crc kubenswrapper[4716]: I1209 15:23:47.055475 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-767b7" podUID="15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac" containerName="registry-server" containerID="cri-o://488380d58b6725c50fc82fb6bac14d415ea3759b11f5fba2d2bb4c5e1728e44c" gracePeriod=2 Dec 09 15:23:47 crc kubenswrapper[4716]: I1209 15:23:47.180182 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kd9gx"] Dec 09 15:23:47 crc kubenswrapper[4716]: I1209 15:23:47.483447 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-767b7" Dec 09 15:23:47 crc kubenswrapper[4716]: I1209 15:23:47.597799 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac-utilities\") pod \"15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac\" (UID: \"15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac\") " Dec 09 15:23:47 crc kubenswrapper[4716]: I1209 15:23:47.597897 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tn2b5\" (UniqueName: \"kubernetes.io/projected/15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac-kube-api-access-tn2b5\") pod \"15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac\" (UID: \"15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac\") " Dec 09 15:23:47 crc kubenswrapper[4716]: I1209 15:23:47.598105 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac-catalog-content\") pod \"15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac\" (UID: \"15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac\") " Dec 09 15:23:47 crc kubenswrapper[4716]: I1209 15:23:47.598947 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac-utilities" (OuterVolumeSpecName: "utilities") pod "15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac" (UID: "15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:23:47 crc kubenswrapper[4716]: I1209 15:23:47.608669 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac-kube-api-access-tn2b5" (OuterVolumeSpecName: "kube-api-access-tn2b5") pod "15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac" (UID: "15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac"). InnerVolumeSpecName "kube-api-access-tn2b5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:23:47 crc kubenswrapper[4716]: I1209 15:23:47.622131 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac" (UID: "15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:23:47 crc kubenswrapper[4716]: I1209 15:23:47.700254 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 15:23:47 crc kubenswrapper[4716]: I1209 15:23:47.700756 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 15:23:47 crc kubenswrapper[4716]: I1209 15:23:47.700768 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tn2b5\" (UniqueName: \"kubernetes.io/projected/15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac-kube-api-access-tn2b5\") on node \"crc\" DevicePath \"\"" Dec 09 15:23:48 crc kubenswrapper[4716]: I1209 15:23:48.067232 4716 generic.go:334] "Generic (PLEG): container finished" podID="15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac" containerID="488380d58b6725c50fc82fb6bac14d415ea3759b11f5fba2d2bb4c5e1728e44c" exitCode=0 Dec 09 15:23:48 crc kubenswrapper[4716]: I1209 15:23:48.067341 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-767b7" event={"ID":"15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac","Type":"ContainerDied","Data":"488380d58b6725c50fc82fb6bac14d415ea3759b11f5fba2d2bb4c5e1728e44c"} Dec 09 15:23:48 crc kubenswrapper[4716]: I1209 15:23:48.067369 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-767b7" Dec 09 15:23:48 crc kubenswrapper[4716]: I1209 15:23:48.067398 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-767b7" event={"ID":"15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac","Type":"ContainerDied","Data":"934b4e32c8634077ea1fd8e215ac88dc2df073708aec022debeca54df2a0d00c"} Dec 09 15:23:48 crc kubenswrapper[4716]: I1209 15:23:48.067424 4716 scope.go:117] "RemoveContainer" containerID="488380d58b6725c50fc82fb6bac14d415ea3759b11f5fba2d2bb4c5e1728e44c" Dec 09 15:23:48 crc kubenswrapper[4716]: I1209 15:23:48.101728 4716 scope.go:117] "RemoveContainer" containerID="d24426bc1481b8fd150b9f62190de54e4463b8de149857504e4ab9be050770ac" Dec 09 15:23:48 crc kubenswrapper[4716]: I1209 15:23:48.164776 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-767b7"] Dec 09 15:23:48 crc kubenswrapper[4716]: I1209 15:23:48.172901 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-767b7"] Dec 09 15:23:48 crc kubenswrapper[4716]: I1209 15:23:48.177236 4716 scope.go:117] "RemoveContainer" containerID="4064482d401111cbec7d1f3fa6096ab14d061aaa0cdb8881836d760b36890175" Dec 09 15:23:48 crc kubenswrapper[4716]: I1209 15:23:48.214877 4716 scope.go:117] "RemoveContainer" containerID="488380d58b6725c50fc82fb6bac14d415ea3759b11f5fba2d2bb4c5e1728e44c" Dec 09 15:23:48 crc kubenswrapper[4716]: E1209 15:23:48.221990 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"488380d58b6725c50fc82fb6bac14d415ea3759b11f5fba2d2bb4c5e1728e44c\": container with ID starting with 488380d58b6725c50fc82fb6bac14d415ea3759b11f5fba2d2bb4c5e1728e44c not found: ID does not exist" containerID="488380d58b6725c50fc82fb6bac14d415ea3759b11f5fba2d2bb4c5e1728e44c" Dec 09 15:23:48 crc kubenswrapper[4716]: I1209 15:23:48.222060 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"488380d58b6725c50fc82fb6bac14d415ea3759b11f5fba2d2bb4c5e1728e44c"} err="failed to get container status \"488380d58b6725c50fc82fb6bac14d415ea3759b11f5fba2d2bb4c5e1728e44c\": rpc error: code = NotFound desc = could not find container \"488380d58b6725c50fc82fb6bac14d415ea3759b11f5fba2d2bb4c5e1728e44c\": container with ID starting with 488380d58b6725c50fc82fb6bac14d415ea3759b11f5fba2d2bb4c5e1728e44c not found: ID does not exist" Dec 09 15:23:48 crc kubenswrapper[4716]: I1209 15:23:48.222102 4716 scope.go:117] "RemoveContainer" containerID="d24426bc1481b8fd150b9f62190de54e4463b8de149857504e4ab9be050770ac" Dec 09 15:23:48 crc kubenswrapper[4716]: E1209 15:23:48.225752 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d24426bc1481b8fd150b9f62190de54e4463b8de149857504e4ab9be050770ac\": container with ID starting with d24426bc1481b8fd150b9f62190de54e4463b8de149857504e4ab9be050770ac not found: ID does not exist" containerID="d24426bc1481b8fd150b9f62190de54e4463b8de149857504e4ab9be050770ac" Dec 09 15:23:48 crc kubenswrapper[4716]: I1209 15:23:48.225790 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d24426bc1481b8fd150b9f62190de54e4463b8de149857504e4ab9be050770ac"} err="failed to get container status \"d24426bc1481b8fd150b9f62190de54e4463b8de149857504e4ab9be050770ac\": rpc error: code = NotFound desc = could not find container \"d24426bc1481b8fd150b9f62190de54e4463b8de149857504e4ab9be050770ac\": container with ID starting with d24426bc1481b8fd150b9f62190de54e4463b8de149857504e4ab9be050770ac not found: ID does not exist" Dec 09 15:23:48 crc kubenswrapper[4716]: I1209 15:23:48.225818 4716 scope.go:117] "RemoveContainer" containerID="4064482d401111cbec7d1f3fa6096ab14d061aaa0cdb8881836d760b36890175" Dec 09 15:23:48 crc kubenswrapper[4716]: E1209 15:23:48.229754 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4064482d401111cbec7d1f3fa6096ab14d061aaa0cdb8881836d760b36890175\": container with ID starting with 4064482d401111cbec7d1f3fa6096ab14d061aaa0cdb8881836d760b36890175 not found: ID does not exist" containerID="4064482d401111cbec7d1f3fa6096ab14d061aaa0cdb8881836d760b36890175" Dec 09 15:23:48 crc kubenswrapper[4716]: I1209 15:23:48.229801 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4064482d401111cbec7d1f3fa6096ab14d061aaa0cdb8881836d760b36890175"} err="failed to get container status \"4064482d401111cbec7d1f3fa6096ab14d061aaa0cdb8881836d760b36890175\": rpc error: code = NotFound desc = could not find container \"4064482d401111cbec7d1f3fa6096ab14d061aaa0cdb8881836d760b36890175\": container with ID starting with 4064482d401111cbec7d1f3fa6096ab14d061aaa0cdb8881836d760b36890175 not found: ID does not exist" Dec 09 15:23:49 crc kubenswrapper[4716]: I1209 15:23:49.078868 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-kd9gx" podUID="fb7f6824-789e-49ca-b406-a4c83ca01f62" containerName="registry-server" containerID="cri-o://6458f667ebdac11f7b62ad629342f496dce53bdfd401b290364337f781870e5b" gracePeriod=2 Dec 09 15:23:49 crc kubenswrapper[4716]: I1209 15:23:49.226559 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac" path="/var/lib/kubelet/pods/15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac/volumes" Dec 09 15:23:49 crc kubenswrapper[4716]: I1209 15:23:49.228603 4716 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: waiting for 15s after being ready Dec 09 15:23:49 crc kubenswrapper[4716]: I1209 15:23:49.228818 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="00f16fe7-88b3-4d0a-ba38-f68a4d340686" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Dec 09 15:23:50 crc kubenswrapper[4716]: I1209 15:23:50.030548 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kd9gx" Dec 09 15:23:50 crc kubenswrapper[4716]: I1209 15:23:50.057120 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ph76h\" (UniqueName: \"kubernetes.io/projected/fb7f6824-789e-49ca-b406-a4c83ca01f62-kube-api-access-ph76h\") pod \"fb7f6824-789e-49ca-b406-a4c83ca01f62\" (UID: \"fb7f6824-789e-49ca-b406-a4c83ca01f62\") " Dec 09 15:23:50 crc kubenswrapper[4716]: I1209 15:23:50.057190 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb7f6824-789e-49ca-b406-a4c83ca01f62-utilities\") pod \"fb7f6824-789e-49ca-b406-a4c83ca01f62\" (UID: \"fb7f6824-789e-49ca-b406-a4c83ca01f62\") " Dec 09 15:23:50 crc kubenswrapper[4716]: I1209 15:23:50.057242 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb7f6824-789e-49ca-b406-a4c83ca01f62-catalog-content\") pod \"fb7f6824-789e-49ca-b406-a4c83ca01f62\" (UID: \"fb7f6824-789e-49ca-b406-a4c83ca01f62\") " Dec 09 15:23:50 crc kubenswrapper[4716]: I1209 15:23:50.058814 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fb7f6824-789e-49ca-b406-a4c83ca01f62-utilities" (OuterVolumeSpecName: "utilities") pod "fb7f6824-789e-49ca-b406-a4c83ca01f62" (UID: "fb7f6824-789e-49ca-b406-a4c83ca01f62"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:23:50 crc kubenswrapper[4716]: I1209 15:23:50.063863 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb7f6824-789e-49ca-b406-a4c83ca01f62-kube-api-access-ph76h" (OuterVolumeSpecName: "kube-api-access-ph76h") pod "fb7f6824-789e-49ca-b406-a4c83ca01f62" (UID: "fb7f6824-789e-49ca-b406-a4c83ca01f62"). InnerVolumeSpecName "kube-api-access-ph76h". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:23:50 crc kubenswrapper[4716]: I1209 15:23:50.092438 4716 generic.go:334] "Generic (PLEG): container finished" podID="fb7f6824-789e-49ca-b406-a4c83ca01f62" containerID="6458f667ebdac11f7b62ad629342f496dce53bdfd401b290364337f781870e5b" exitCode=0 Dec 09 15:23:50 crc kubenswrapper[4716]: I1209 15:23:50.092540 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kd9gx" Dec 09 15:23:50 crc kubenswrapper[4716]: I1209 15:23:50.092538 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kd9gx" event={"ID":"fb7f6824-789e-49ca-b406-a4c83ca01f62","Type":"ContainerDied","Data":"6458f667ebdac11f7b62ad629342f496dce53bdfd401b290364337f781870e5b"} Dec 09 15:23:50 crc kubenswrapper[4716]: I1209 15:23:50.092702 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kd9gx" event={"ID":"fb7f6824-789e-49ca-b406-a4c83ca01f62","Type":"ContainerDied","Data":"e3d5f8599a71e11ab34e8bfa2541855fd0c41dc67f9448a187ef8b34f4ba487b"} Dec 09 15:23:50 crc kubenswrapper[4716]: I1209 15:23:50.092732 4716 scope.go:117] "RemoveContainer" containerID="6458f667ebdac11f7b62ad629342f496dce53bdfd401b290364337f781870e5b" Dec 09 15:23:50 crc kubenswrapper[4716]: I1209 15:23:50.116123 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fb7f6824-789e-49ca-b406-a4c83ca01f62-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fb7f6824-789e-49ca-b406-a4c83ca01f62" (UID: "fb7f6824-789e-49ca-b406-a4c83ca01f62"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:23:50 crc kubenswrapper[4716]: I1209 15:23:50.120635 4716 scope.go:117] "RemoveContainer" containerID="d08a4fbb13ec13e9af5d3ac52e3dca5c6b9ecf03f2857fa2bf93cc78ba4ebe42" Dec 09 15:23:50 crc kubenswrapper[4716]: I1209 15:23:50.146341 4716 scope.go:117] "RemoveContainer" containerID="1be484cdcfc6c5059aa0918d2ef18d3346618ec9b9c2bfc82f967ddc11b3e471" Dec 09 15:23:50 crc kubenswrapper[4716]: I1209 15:23:50.158784 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ph76h\" (UniqueName: \"kubernetes.io/projected/fb7f6824-789e-49ca-b406-a4c83ca01f62-kube-api-access-ph76h\") on node \"crc\" DevicePath \"\"" Dec 09 15:23:50 crc kubenswrapper[4716]: I1209 15:23:50.158816 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb7f6824-789e-49ca-b406-a4c83ca01f62-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 15:23:50 crc kubenswrapper[4716]: I1209 15:23:50.158830 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb7f6824-789e-49ca-b406-a4c83ca01f62-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 15:23:50 crc kubenswrapper[4716]: I1209 15:23:50.176258 4716 scope.go:117] "RemoveContainer" containerID="6458f667ebdac11f7b62ad629342f496dce53bdfd401b290364337f781870e5b" Dec 09 15:23:50 crc kubenswrapper[4716]: E1209 15:23:50.185417 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6458f667ebdac11f7b62ad629342f496dce53bdfd401b290364337f781870e5b\": container with ID starting with 6458f667ebdac11f7b62ad629342f496dce53bdfd401b290364337f781870e5b not found: ID does not exist" containerID="6458f667ebdac11f7b62ad629342f496dce53bdfd401b290364337f781870e5b" Dec 09 15:23:50 crc kubenswrapper[4716]: I1209 15:23:50.185502 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6458f667ebdac11f7b62ad629342f496dce53bdfd401b290364337f781870e5b"} err="failed to get container status \"6458f667ebdac11f7b62ad629342f496dce53bdfd401b290364337f781870e5b\": rpc error: code = NotFound desc = could not find container \"6458f667ebdac11f7b62ad629342f496dce53bdfd401b290364337f781870e5b\": container with ID starting with 6458f667ebdac11f7b62ad629342f496dce53bdfd401b290364337f781870e5b not found: ID does not exist" Dec 09 15:23:50 crc kubenswrapper[4716]: I1209 15:23:50.185550 4716 scope.go:117] "RemoveContainer" containerID="d08a4fbb13ec13e9af5d3ac52e3dca5c6b9ecf03f2857fa2bf93cc78ba4ebe42" Dec 09 15:23:50 crc kubenswrapper[4716]: E1209 15:23:50.186030 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d08a4fbb13ec13e9af5d3ac52e3dca5c6b9ecf03f2857fa2bf93cc78ba4ebe42\": container with ID starting with d08a4fbb13ec13e9af5d3ac52e3dca5c6b9ecf03f2857fa2bf93cc78ba4ebe42 not found: ID does not exist" containerID="d08a4fbb13ec13e9af5d3ac52e3dca5c6b9ecf03f2857fa2bf93cc78ba4ebe42" Dec 09 15:23:50 crc kubenswrapper[4716]: I1209 15:23:50.186071 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d08a4fbb13ec13e9af5d3ac52e3dca5c6b9ecf03f2857fa2bf93cc78ba4ebe42"} err="failed to get container status \"d08a4fbb13ec13e9af5d3ac52e3dca5c6b9ecf03f2857fa2bf93cc78ba4ebe42\": rpc error: code = NotFound desc = could not find container \"d08a4fbb13ec13e9af5d3ac52e3dca5c6b9ecf03f2857fa2bf93cc78ba4ebe42\": container with ID starting with d08a4fbb13ec13e9af5d3ac52e3dca5c6b9ecf03f2857fa2bf93cc78ba4ebe42 not found: ID does not exist" Dec 09 15:23:50 crc kubenswrapper[4716]: I1209 15:23:50.186091 4716 scope.go:117] "RemoveContainer" containerID="1be484cdcfc6c5059aa0918d2ef18d3346618ec9b9c2bfc82f967ddc11b3e471" Dec 09 15:23:50 crc kubenswrapper[4716]: E1209 15:23:50.186848 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1be484cdcfc6c5059aa0918d2ef18d3346618ec9b9c2bfc82f967ddc11b3e471\": container with ID starting with 1be484cdcfc6c5059aa0918d2ef18d3346618ec9b9c2bfc82f967ddc11b3e471 not found: ID does not exist" containerID="1be484cdcfc6c5059aa0918d2ef18d3346618ec9b9c2bfc82f967ddc11b3e471" Dec 09 15:23:50 crc kubenswrapper[4716]: I1209 15:23:50.186885 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1be484cdcfc6c5059aa0918d2ef18d3346618ec9b9c2bfc82f967ddc11b3e471"} err="failed to get container status \"1be484cdcfc6c5059aa0918d2ef18d3346618ec9b9c2bfc82f967ddc11b3e471\": rpc error: code = NotFound desc = could not find container \"1be484cdcfc6c5059aa0918d2ef18d3346618ec9b9c2bfc82f967ddc11b3e471\": container with ID starting with 1be484cdcfc6c5059aa0918d2ef18d3346618ec9b9c2bfc82f967ddc11b3e471 not found: ID does not exist" Dec 09 15:23:50 crc kubenswrapper[4716]: I1209 15:23:50.426208 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kd9gx"] Dec 09 15:23:50 crc kubenswrapper[4716]: I1209 15:23:50.433826 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-kd9gx"] Dec 09 15:23:51 crc kubenswrapper[4716]: I1209 15:23:51.224474 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fb7f6824-789e-49ca-b406-a4c83ca01f62" path="/var/lib/kubelet/pods/fb7f6824-789e-49ca-b406-a4c83ca01f62/volumes" Dec 09 15:23:59 crc kubenswrapper[4716]: I1209 15:23:59.222419 4716 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: waiting for 15s after being ready Dec 09 15:23:59 crc kubenswrapper[4716]: I1209 15:23:59.223231 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="00f16fe7-88b3-4d0a-ba38-f68a4d340686" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Dec 09 15:24:09 crc kubenswrapper[4716]: I1209 15:24:09.229234 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-ingester-0" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.100840 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/collector-hbgfm"] Dec 09 15:24:29 crc kubenswrapper[4716]: E1209 15:24:29.102071 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac" containerName="registry-server" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.102090 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac" containerName="registry-server" Dec 09 15:24:29 crc kubenswrapper[4716]: E1209 15:24:29.102108 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb7f6824-789e-49ca-b406-a4c83ca01f62" containerName="extract-utilities" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.102115 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb7f6824-789e-49ca-b406-a4c83ca01f62" containerName="extract-utilities" Dec 09 15:24:29 crc kubenswrapper[4716]: E1209 15:24:29.102131 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85cb1fd9-d5ae-45f6-93fc-abdddd8acb11" containerName="extract-utilities" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.102142 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="85cb1fd9-d5ae-45f6-93fc-abdddd8acb11" containerName="extract-utilities" Dec 09 15:24:29 crc kubenswrapper[4716]: E1209 15:24:29.102155 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb7f6824-789e-49ca-b406-a4c83ca01f62" containerName="extract-content" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.102161 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb7f6824-789e-49ca-b406-a4c83ca01f62" containerName="extract-content" Dec 09 15:24:29 crc kubenswrapper[4716]: E1209 15:24:29.102170 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85cb1fd9-d5ae-45f6-93fc-abdddd8acb11" containerName="extract-content" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.102176 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="85cb1fd9-d5ae-45f6-93fc-abdddd8acb11" containerName="extract-content" Dec 09 15:24:29 crc kubenswrapper[4716]: E1209 15:24:29.102185 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac" containerName="extract-content" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.102190 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac" containerName="extract-content" Dec 09 15:24:29 crc kubenswrapper[4716]: E1209 15:24:29.102204 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac" containerName="extract-utilities" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.102210 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac" containerName="extract-utilities" Dec 09 15:24:29 crc kubenswrapper[4716]: E1209 15:24:29.102218 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb7f6824-789e-49ca-b406-a4c83ca01f62" containerName="registry-server" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.102224 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb7f6824-789e-49ca-b406-a4c83ca01f62" containerName="registry-server" Dec 09 15:24:29 crc kubenswrapper[4716]: E1209 15:24:29.102234 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85cb1fd9-d5ae-45f6-93fc-abdddd8acb11" containerName="registry-server" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.102240 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="85cb1fd9-d5ae-45f6-93fc-abdddd8acb11" containerName="registry-server" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.102371 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="15e39a7e-0f9f-4da2-a1cd-a74b51b7e1ac" containerName="registry-server" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.102387 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="85cb1fd9-d5ae-45f6-93fc-abdddd8acb11" containerName="registry-server" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.102396 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb7f6824-789e-49ca-b406-a4c83ca01f62" containerName="registry-server" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.103193 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-hbgfm" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.108251 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-syslog-receiver" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.108697 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-metrics" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.108844 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-dockercfg-mmhwg" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.110103 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-token" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.113292 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-trustbundle" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.113769 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-config" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.124094 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1e714d09-0665-49a4-8275-61a16c9f5eeb-trusted-ca\") pod \"collector-hbgfm\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " pod="openshift-logging/collector-hbgfm" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.124160 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qwx27\" (UniqueName: \"kubernetes.io/projected/1e714d09-0665-49a4-8275-61a16c9f5eeb-kube-api-access-qwx27\") pod \"collector-hbgfm\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " pod="openshift-logging/collector-hbgfm" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.124194 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/1e714d09-0665-49a4-8275-61a16c9f5eeb-metrics\") pod \"collector-hbgfm\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " pod="openshift-logging/collector-hbgfm" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.124216 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/1e714d09-0665-49a4-8275-61a16c9f5eeb-entrypoint\") pod \"collector-hbgfm\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " pod="openshift-logging/collector-hbgfm" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.124274 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e714d09-0665-49a4-8275-61a16c9f5eeb-config\") pod \"collector-hbgfm\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " pod="openshift-logging/collector-hbgfm" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.124318 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/1e714d09-0665-49a4-8275-61a16c9f5eeb-sa-token\") pod \"collector-hbgfm\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " pod="openshift-logging/collector-hbgfm" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.124348 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/1e714d09-0665-49a4-8275-61a16c9f5eeb-tmp\") pod \"collector-hbgfm\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " pod="openshift-logging/collector-hbgfm" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.124381 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/1e714d09-0665-49a4-8275-61a16c9f5eeb-collector-syslog-receiver\") pod \"collector-hbgfm\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " pod="openshift-logging/collector-hbgfm" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.124409 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/1e714d09-0665-49a4-8275-61a16c9f5eeb-config-openshift-service-cacrt\") pod \"collector-hbgfm\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " pod="openshift-logging/collector-hbgfm" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.124441 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/1e714d09-0665-49a4-8275-61a16c9f5eeb-datadir\") pod \"collector-hbgfm\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " pod="openshift-logging/collector-hbgfm" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.124467 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/1e714d09-0665-49a4-8275-61a16c9f5eeb-collector-token\") pod \"collector-hbgfm\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " pod="openshift-logging/collector-hbgfm" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.142212 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-hbgfm"] Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.209144 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-logging/collector-hbgfm"] Dec 09 15:24:29 crc kubenswrapper[4716]: E1209 15:24:29.210171 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[collector-syslog-receiver collector-token config config-openshift-service-cacrt datadir entrypoint kube-api-access-qwx27 metrics sa-token tmp trusted-ca], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openshift-logging/collector-hbgfm" podUID="1e714d09-0665-49a4-8275-61a16c9f5eeb" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.227585 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e714d09-0665-49a4-8275-61a16c9f5eeb-config\") pod \"collector-hbgfm\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " pod="openshift-logging/collector-hbgfm" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.228006 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/1e714d09-0665-49a4-8275-61a16c9f5eeb-sa-token\") pod \"collector-hbgfm\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " pod="openshift-logging/collector-hbgfm" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.228112 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/1e714d09-0665-49a4-8275-61a16c9f5eeb-tmp\") pod \"collector-hbgfm\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " pod="openshift-logging/collector-hbgfm" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.228209 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/1e714d09-0665-49a4-8275-61a16c9f5eeb-collector-syslog-receiver\") pod \"collector-hbgfm\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " pod="openshift-logging/collector-hbgfm" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.228317 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/1e714d09-0665-49a4-8275-61a16c9f5eeb-config-openshift-service-cacrt\") pod \"collector-hbgfm\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " pod="openshift-logging/collector-hbgfm" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.228405 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/1e714d09-0665-49a4-8275-61a16c9f5eeb-datadir\") pod \"collector-hbgfm\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " pod="openshift-logging/collector-hbgfm" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.228487 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/1e714d09-0665-49a4-8275-61a16c9f5eeb-collector-token\") pod \"collector-hbgfm\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " pod="openshift-logging/collector-hbgfm" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.228586 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1e714d09-0665-49a4-8275-61a16c9f5eeb-trusted-ca\") pod \"collector-hbgfm\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " pod="openshift-logging/collector-hbgfm" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.228721 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qwx27\" (UniqueName: \"kubernetes.io/projected/1e714d09-0665-49a4-8275-61a16c9f5eeb-kube-api-access-qwx27\") pod \"collector-hbgfm\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " pod="openshift-logging/collector-hbgfm" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.228864 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/1e714d09-0665-49a4-8275-61a16c9f5eeb-metrics\") pod \"collector-hbgfm\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " pod="openshift-logging/collector-hbgfm" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.228966 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/1e714d09-0665-49a4-8275-61a16c9f5eeb-entrypoint\") pod \"collector-hbgfm\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " pod="openshift-logging/collector-hbgfm" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.230442 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/1e714d09-0665-49a4-8275-61a16c9f5eeb-entrypoint\") pod \"collector-hbgfm\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " pod="openshift-logging/collector-hbgfm" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.230921 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/1e714d09-0665-49a4-8275-61a16c9f5eeb-datadir\") pod \"collector-hbgfm\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " pod="openshift-logging/collector-hbgfm" Dec 09 15:24:29 crc kubenswrapper[4716]: E1209 15:24:29.231137 4716 secret.go:188] Couldn't get secret openshift-logging/collector-metrics: secret "collector-metrics" not found Dec 09 15:24:29 crc kubenswrapper[4716]: E1209 15:24:29.231233 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1e714d09-0665-49a4-8275-61a16c9f5eeb-metrics podName:1e714d09-0665-49a4-8275-61a16c9f5eeb nodeName:}" failed. No retries permitted until 2025-12-09 15:24:29.731203375 +0000 UTC m=+956.885947353 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics" (UniqueName: "kubernetes.io/secret/1e714d09-0665-49a4-8275-61a16c9f5eeb-metrics") pod "collector-hbgfm" (UID: "1e714d09-0665-49a4-8275-61a16c9f5eeb") : secret "collector-metrics" not found Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.232255 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/1e714d09-0665-49a4-8275-61a16c9f5eeb-config-openshift-service-cacrt\") pod \"collector-hbgfm\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " pod="openshift-logging/collector-hbgfm" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.232403 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e714d09-0665-49a4-8275-61a16c9f5eeb-config\") pod \"collector-hbgfm\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " pod="openshift-logging/collector-hbgfm" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.232577 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1e714d09-0665-49a4-8275-61a16c9f5eeb-trusted-ca\") pod \"collector-hbgfm\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " pod="openshift-logging/collector-hbgfm" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.238735 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/1e714d09-0665-49a4-8275-61a16c9f5eeb-collector-token\") pod \"collector-hbgfm\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " pod="openshift-logging/collector-hbgfm" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.239371 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/1e714d09-0665-49a4-8275-61a16c9f5eeb-collector-syslog-receiver\") pod \"collector-hbgfm\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " pod="openshift-logging/collector-hbgfm" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.243035 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/1e714d09-0665-49a4-8275-61a16c9f5eeb-tmp\") pod \"collector-hbgfm\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " pod="openshift-logging/collector-hbgfm" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.251145 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qwx27\" (UniqueName: \"kubernetes.io/projected/1e714d09-0665-49a4-8275-61a16c9f5eeb-kube-api-access-qwx27\") pod \"collector-hbgfm\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " pod="openshift-logging/collector-hbgfm" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.258179 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/1e714d09-0665-49a4-8275-61a16c9f5eeb-sa-token\") pod \"collector-hbgfm\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " pod="openshift-logging/collector-hbgfm" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.419805 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-hbgfm" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.433037 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-hbgfm" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.533649 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/1e714d09-0665-49a4-8275-61a16c9f5eeb-entrypoint\") pod \"1e714d09-0665-49a4-8275-61a16c9f5eeb\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.533739 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e714d09-0665-49a4-8275-61a16c9f5eeb-config\") pod \"1e714d09-0665-49a4-8275-61a16c9f5eeb\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.533762 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/1e714d09-0665-49a4-8275-61a16c9f5eeb-tmp\") pod \"1e714d09-0665-49a4-8275-61a16c9f5eeb\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.533791 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/1e714d09-0665-49a4-8275-61a16c9f5eeb-collector-syslog-receiver\") pod \"1e714d09-0665-49a4-8275-61a16c9f5eeb\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.533912 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/1e714d09-0665-49a4-8275-61a16c9f5eeb-collector-token\") pod \"1e714d09-0665-49a4-8275-61a16c9f5eeb\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.533954 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1e714d09-0665-49a4-8275-61a16c9f5eeb-trusted-ca\") pod \"1e714d09-0665-49a4-8275-61a16c9f5eeb\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.533979 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qwx27\" (UniqueName: \"kubernetes.io/projected/1e714d09-0665-49a4-8275-61a16c9f5eeb-kube-api-access-qwx27\") pod \"1e714d09-0665-49a4-8275-61a16c9f5eeb\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.534025 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/1e714d09-0665-49a4-8275-61a16c9f5eeb-config-openshift-service-cacrt\") pod \"1e714d09-0665-49a4-8275-61a16c9f5eeb\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.534068 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/1e714d09-0665-49a4-8275-61a16c9f5eeb-sa-token\") pod \"1e714d09-0665-49a4-8275-61a16c9f5eeb\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.534090 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/1e714d09-0665-49a4-8275-61a16c9f5eeb-datadir\") pod \"1e714d09-0665-49a4-8275-61a16c9f5eeb\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.534693 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1e714d09-0665-49a4-8275-61a16c9f5eeb-datadir" (OuterVolumeSpecName: "datadir") pod "1e714d09-0665-49a4-8275-61a16c9f5eeb" (UID: "1e714d09-0665-49a4-8275-61a16c9f5eeb"). InnerVolumeSpecName "datadir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.535260 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1e714d09-0665-49a4-8275-61a16c9f5eeb-entrypoint" (OuterVolumeSpecName: "entrypoint") pod "1e714d09-0665-49a4-8275-61a16c9f5eeb" (UID: "1e714d09-0665-49a4-8275-61a16c9f5eeb"). InnerVolumeSpecName "entrypoint". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.535521 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1e714d09-0665-49a4-8275-61a16c9f5eeb-config" (OuterVolumeSpecName: "config") pod "1e714d09-0665-49a4-8275-61a16c9f5eeb" (UID: "1e714d09-0665-49a4-8275-61a16c9f5eeb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.536246 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1e714d09-0665-49a4-8275-61a16c9f5eeb-config-openshift-service-cacrt" (OuterVolumeSpecName: "config-openshift-service-cacrt") pod "1e714d09-0665-49a4-8275-61a16c9f5eeb" (UID: "1e714d09-0665-49a4-8275-61a16c9f5eeb"). InnerVolumeSpecName "config-openshift-service-cacrt". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.536381 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1e714d09-0665-49a4-8275-61a16c9f5eeb-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "1e714d09-0665-49a4-8275-61a16c9f5eeb" (UID: "1e714d09-0665-49a4-8275-61a16c9f5eeb"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.538787 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1e714d09-0665-49a4-8275-61a16c9f5eeb-tmp" (OuterVolumeSpecName: "tmp") pod "1e714d09-0665-49a4-8275-61a16c9f5eeb" (UID: "1e714d09-0665-49a4-8275-61a16c9f5eeb"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.539300 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e714d09-0665-49a4-8275-61a16c9f5eeb-collector-syslog-receiver" (OuterVolumeSpecName: "collector-syslog-receiver") pod "1e714d09-0665-49a4-8275-61a16c9f5eeb" (UID: "1e714d09-0665-49a4-8275-61a16c9f5eeb"). InnerVolumeSpecName "collector-syslog-receiver". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.539376 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e714d09-0665-49a4-8275-61a16c9f5eeb-kube-api-access-qwx27" (OuterVolumeSpecName: "kube-api-access-qwx27") pod "1e714d09-0665-49a4-8275-61a16c9f5eeb" (UID: "1e714d09-0665-49a4-8275-61a16c9f5eeb"). InnerVolumeSpecName "kube-api-access-qwx27". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.541685 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e714d09-0665-49a4-8275-61a16c9f5eeb-collector-token" (OuterVolumeSpecName: "collector-token") pod "1e714d09-0665-49a4-8275-61a16c9f5eeb" (UID: "1e714d09-0665-49a4-8275-61a16c9f5eeb"). InnerVolumeSpecName "collector-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.544015 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e714d09-0665-49a4-8275-61a16c9f5eeb-sa-token" (OuterVolumeSpecName: "sa-token") pod "1e714d09-0665-49a4-8275-61a16c9f5eeb" (UID: "1e714d09-0665-49a4-8275-61a16c9f5eeb"). InnerVolumeSpecName "sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.636940 4716 reconciler_common.go:293] "Volume detached for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/1e714d09-0665-49a4-8275-61a16c9f5eeb-config-openshift-service-cacrt\") on node \"crc\" DevicePath \"\"" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.636982 4716 reconciler_common.go:293] "Volume detached for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/1e714d09-0665-49a4-8275-61a16c9f5eeb-sa-token\") on node \"crc\" DevicePath \"\"" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.636993 4716 reconciler_common.go:293] "Volume detached for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/1e714d09-0665-49a4-8275-61a16c9f5eeb-datadir\") on node \"crc\" DevicePath \"\"" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.637003 4716 reconciler_common.go:293] "Volume detached for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/1e714d09-0665-49a4-8275-61a16c9f5eeb-entrypoint\") on node \"crc\" DevicePath \"\"" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.637012 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e714d09-0665-49a4-8275-61a16c9f5eeb-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.637021 4716 reconciler_common.go:293] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/1e714d09-0665-49a4-8275-61a16c9f5eeb-tmp\") on node \"crc\" DevicePath \"\"" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.637035 4716 reconciler_common.go:293] "Volume detached for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/1e714d09-0665-49a4-8275-61a16c9f5eeb-collector-syslog-receiver\") on node \"crc\" DevicePath \"\"" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.637046 4716 reconciler_common.go:293] "Volume detached for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/1e714d09-0665-49a4-8275-61a16c9f5eeb-collector-token\") on node \"crc\" DevicePath \"\"" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.637055 4716 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1e714d09-0665-49a4-8275-61a16c9f5eeb-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.637063 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qwx27\" (UniqueName: \"kubernetes.io/projected/1e714d09-0665-49a4-8275-61a16c9f5eeb-kube-api-access-qwx27\") on node \"crc\" DevicePath \"\"" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.737804 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/1e714d09-0665-49a4-8275-61a16c9f5eeb-metrics\") pod \"collector-hbgfm\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " pod="openshift-logging/collector-hbgfm" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.741561 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/1e714d09-0665-49a4-8275-61a16c9f5eeb-metrics\") pod \"collector-hbgfm\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " pod="openshift-logging/collector-hbgfm" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.839835 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/1e714d09-0665-49a4-8275-61a16c9f5eeb-metrics\") pod \"1e714d09-0665-49a4-8275-61a16c9f5eeb\" (UID: \"1e714d09-0665-49a4-8275-61a16c9f5eeb\") " Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.842708 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e714d09-0665-49a4-8275-61a16c9f5eeb-metrics" (OuterVolumeSpecName: "metrics") pod "1e714d09-0665-49a4-8275-61a16c9f5eeb" (UID: "1e714d09-0665-49a4-8275-61a16c9f5eeb"). InnerVolumeSpecName "metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:24:29 crc kubenswrapper[4716]: I1209 15:24:29.940873 4716 reconciler_common.go:293] "Volume detached for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/1e714d09-0665-49a4-8275-61a16c9f5eeb-metrics\") on node \"crc\" DevicePath \"\"" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.426771 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-hbgfm" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.476128 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-logging/collector-hbgfm"] Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.484002 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-logging/collector-hbgfm"] Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.506086 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/collector-88hn7"] Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.508177 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-88hn7" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.513831 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-88hn7"] Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.513843 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-syslog-receiver" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.513881 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-metrics" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.514099 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-token" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.514312 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-config" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.514612 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-dockercfg-mmhwg" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.524513 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-trustbundle" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.553932 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/367f5f03-2020-4489-95e5-ec09b8e37128-tmp\") pod \"collector-88hn7\" (UID: \"367f5f03-2020-4489-95e5-ec09b8e37128\") " pod="openshift-logging/collector-88hn7" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.553985 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/367f5f03-2020-4489-95e5-ec09b8e37128-collector-token\") pod \"collector-88hn7\" (UID: \"367f5f03-2020-4489-95e5-ec09b8e37128\") " pod="openshift-logging/collector-88hn7" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.554148 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/367f5f03-2020-4489-95e5-ec09b8e37128-sa-token\") pod \"collector-88hn7\" (UID: \"367f5f03-2020-4489-95e5-ec09b8e37128\") " pod="openshift-logging/collector-88hn7" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.554216 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/367f5f03-2020-4489-95e5-ec09b8e37128-datadir\") pod \"collector-88hn7\" (UID: \"367f5f03-2020-4489-95e5-ec09b8e37128\") " pod="openshift-logging/collector-88hn7" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.554255 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/367f5f03-2020-4489-95e5-ec09b8e37128-metrics\") pod \"collector-88hn7\" (UID: \"367f5f03-2020-4489-95e5-ec09b8e37128\") " pod="openshift-logging/collector-88hn7" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.554298 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/367f5f03-2020-4489-95e5-ec09b8e37128-entrypoint\") pod \"collector-88hn7\" (UID: \"367f5f03-2020-4489-95e5-ec09b8e37128\") " pod="openshift-logging/collector-88hn7" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.554323 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/367f5f03-2020-4489-95e5-ec09b8e37128-trusted-ca\") pod \"collector-88hn7\" (UID: \"367f5f03-2020-4489-95e5-ec09b8e37128\") " pod="openshift-logging/collector-88hn7" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.554340 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/367f5f03-2020-4489-95e5-ec09b8e37128-collector-syslog-receiver\") pod \"collector-88hn7\" (UID: \"367f5f03-2020-4489-95e5-ec09b8e37128\") " pod="openshift-logging/collector-88hn7" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.554375 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ctpml\" (UniqueName: \"kubernetes.io/projected/367f5f03-2020-4489-95e5-ec09b8e37128-kube-api-access-ctpml\") pod \"collector-88hn7\" (UID: \"367f5f03-2020-4489-95e5-ec09b8e37128\") " pod="openshift-logging/collector-88hn7" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.554402 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/367f5f03-2020-4489-95e5-ec09b8e37128-config\") pod \"collector-88hn7\" (UID: \"367f5f03-2020-4489-95e5-ec09b8e37128\") " pod="openshift-logging/collector-88hn7" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.554425 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/367f5f03-2020-4489-95e5-ec09b8e37128-config-openshift-service-cacrt\") pod \"collector-88hn7\" (UID: \"367f5f03-2020-4489-95e5-ec09b8e37128\") " pod="openshift-logging/collector-88hn7" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.656330 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/367f5f03-2020-4489-95e5-ec09b8e37128-tmp\") pod \"collector-88hn7\" (UID: \"367f5f03-2020-4489-95e5-ec09b8e37128\") " pod="openshift-logging/collector-88hn7" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.656396 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/367f5f03-2020-4489-95e5-ec09b8e37128-collector-token\") pod \"collector-88hn7\" (UID: \"367f5f03-2020-4489-95e5-ec09b8e37128\") " pod="openshift-logging/collector-88hn7" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.656444 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/367f5f03-2020-4489-95e5-ec09b8e37128-sa-token\") pod \"collector-88hn7\" (UID: \"367f5f03-2020-4489-95e5-ec09b8e37128\") " pod="openshift-logging/collector-88hn7" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.656485 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/367f5f03-2020-4489-95e5-ec09b8e37128-datadir\") pod \"collector-88hn7\" (UID: \"367f5f03-2020-4489-95e5-ec09b8e37128\") " pod="openshift-logging/collector-88hn7" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.656526 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/367f5f03-2020-4489-95e5-ec09b8e37128-metrics\") pod \"collector-88hn7\" (UID: \"367f5f03-2020-4489-95e5-ec09b8e37128\") " pod="openshift-logging/collector-88hn7" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.656579 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/367f5f03-2020-4489-95e5-ec09b8e37128-entrypoint\") pod \"collector-88hn7\" (UID: \"367f5f03-2020-4489-95e5-ec09b8e37128\") " pod="openshift-logging/collector-88hn7" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.656611 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/367f5f03-2020-4489-95e5-ec09b8e37128-trusted-ca\") pod \"collector-88hn7\" (UID: \"367f5f03-2020-4489-95e5-ec09b8e37128\") " pod="openshift-logging/collector-88hn7" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.656662 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/367f5f03-2020-4489-95e5-ec09b8e37128-collector-syslog-receiver\") pod \"collector-88hn7\" (UID: \"367f5f03-2020-4489-95e5-ec09b8e37128\") " pod="openshift-logging/collector-88hn7" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.656712 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ctpml\" (UniqueName: \"kubernetes.io/projected/367f5f03-2020-4489-95e5-ec09b8e37128-kube-api-access-ctpml\") pod \"collector-88hn7\" (UID: \"367f5f03-2020-4489-95e5-ec09b8e37128\") " pod="openshift-logging/collector-88hn7" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.656748 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/367f5f03-2020-4489-95e5-ec09b8e37128-config\") pod \"collector-88hn7\" (UID: \"367f5f03-2020-4489-95e5-ec09b8e37128\") " pod="openshift-logging/collector-88hn7" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.656778 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/367f5f03-2020-4489-95e5-ec09b8e37128-config-openshift-service-cacrt\") pod \"collector-88hn7\" (UID: \"367f5f03-2020-4489-95e5-ec09b8e37128\") " pod="openshift-logging/collector-88hn7" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.657321 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/367f5f03-2020-4489-95e5-ec09b8e37128-datadir\") pod \"collector-88hn7\" (UID: \"367f5f03-2020-4489-95e5-ec09b8e37128\") " pod="openshift-logging/collector-88hn7" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.658236 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/367f5f03-2020-4489-95e5-ec09b8e37128-trusted-ca\") pod \"collector-88hn7\" (UID: \"367f5f03-2020-4489-95e5-ec09b8e37128\") " pod="openshift-logging/collector-88hn7" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.658398 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/367f5f03-2020-4489-95e5-ec09b8e37128-config\") pod \"collector-88hn7\" (UID: \"367f5f03-2020-4489-95e5-ec09b8e37128\") " pod="openshift-logging/collector-88hn7" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.658790 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/367f5f03-2020-4489-95e5-ec09b8e37128-config-openshift-service-cacrt\") pod \"collector-88hn7\" (UID: \"367f5f03-2020-4489-95e5-ec09b8e37128\") " pod="openshift-logging/collector-88hn7" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.659217 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/367f5f03-2020-4489-95e5-ec09b8e37128-entrypoint\") pod \"collector-88hn7\" (UID: \"367f5f03-2020-4489-95e5-ec09b8e37128\") " pod="openshift-logging/collector-88hn7" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.661776 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/367f5f03-2020-4489-95e5-ec09b8e37128-tmp\") pod \"collector-88hn7\" (UID: \"367f5f03-2020-4489-95e5-ec09b8e37128\") " pod="openshift-logging/collector-88hn7" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.662129 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/367f5f03-2020-4489-95e5-ec09b8e37128-collector-syslog-receiver\") pod \"collector-88hn7\" (UID: \"367f5f03-2020-4489-95e5-ec09b8e37128\") " pod="openshift-logging/collector-88hn7" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.662130 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/367f5f03-2020-4489-95e5-ec09b8e37128-metrics\") pod \"collector-88hn7\" (UID: \"367f5f03-2020-4489-95e5-ec09b8e37128\") " pod="openshift-logging/collector-88hn7" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.662872 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/367f5f03-2020-4489-95e5-ec09b8e37128-collector-token\") pod \"collector-88hn7\" (UID: \"367f5f03-2020-4489-95e5-ec09b8e37128\") " pod="openshift-logging/collector-88hn7" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.690963 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/367f5f03-2020-4489-95e5-ec09b8e37128-sa-token\") pod \"collector-88hn7\" (UID: \"367f5f03-2020-4489-95e5-ec09b8e37128\") " pod="openshift-logging/collector-88hn7" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.701368 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ctpml\" (UniqueName: \"kubernetes.io/projected/367f5f03-2020-4489-95e5-ec09b8e37128-kube-api-access-ctpml\") pod \"collector-88hn7\" (UID: \"367f5f03-2020-4489-95e5-ec09b8e37128\") " pod="openshift-logging/collector-88hn7" Dec 09 15:24:30 crc kubenswrapper[4716]: I1209 15:24:30.830692 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-88hn7" Dec 09 15:24:31 crc kubenswrapper[4716]: I1209 15:24:31.225376 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1e714d09-0665-49a4-8275-61a16c9f5eeb" path="/var/lib/kubelet/pods/1e714d09-0665-49a4-8275-61a16c9f5eeb/volumes" Dec 09 15:24:31 crc kubenswrapper[4716]: I1209 15:24:31.405256 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-88hn7"] Dec 09 15:24:31 crc kubenswrapper[4716]: I1209 15:24:31.435982 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/collector-88hn7" event={"ID":"367f5f03-2020-4489-95e5-ec09b8e37128","Type":"ContainerStarted","Data":"d7ffc5f2a74ef19915495e39edadf61bd7d24a9069b7a985636812763b7e9699"} Dec 09 15:24:39 crc kubenswrapper[4716]: I1209 15:24:39.505815 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/collector-88hn7" event={"ID":"367f5f03-2020-4489-95e5-ec09b8e37128","Type":"ContainerStarted","Data":"20a27f5f64bd3630c26f595d48daa795827a9df3c2bb1e18fc0e776fb7986990"} Dec 09 15:24:39 crc kubenswrapper[4716]: I1209 15:24:39.553732 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/collector-88hn7" podStartSLOduration=2.3252965 podStartE2EDuration="9.553697679s" podCreationTimestamp="2025-12-09 15:24:30 +0000 UTC" firstStartedPulling="2025-12-09 15:24:31.417411252 +0000 UTC m=+958.572155240" lastFinishedPulling="2025-12-09 15:24:38.645812431 +0000 UTC m=+965.800556419" observedRunningTime="2025-12-09 15:24:39.546702348 +0000 UTC m=+966.701446336" watchObservedRunningTime="2025-12-09 15:24:39.553697679 +0000 UTC m=+966.708441657" Dec 09 15:25:11 crc kubenswrapper[4716]: I1209 15:25:11.817064 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2p7dq"] Dec 09 15:25:11 crc kubenswrapper[4716]: I1209 15:25:11.819506 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2p7dq" Dec 09 15:25:11 crc kubenswrapper[4716]: I1209 15:25:11.821664 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Dec 09 15:25:11 crc kubenswrapper[4716]: I1209 15:25:11.829317 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2p7dq"] Dec 09 15:25:12 crc kubenswrapper[4716]: I1209 15:25:12.010793 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fr9wc\" (UniqueName: \"kubernetes.io/projected/63c5a169-7a26-45fb-90ad-a3d2d43516fd-kube-api-access-fr9wc\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2p7dq\" (UID: \"63c5a169-7a26-45fb-90ad-a3d2d43516fd\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2p7dq" Dec 09 15:25:12 crc kubenswrapper[4716]: I1209 15:25:12.010924 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/63c5a169-7a26-45fb-90ad-a3d2d43516fd-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2p7dq\" (UID: \"63c5a169-7a26-45fb-90ad-a3d2d43516fd\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2p7dq" Dec 09 15:25:12 crc kubenswrapper[4716]: I1209 15:25:12.010995 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/63c5a169-7a26-45fb-90ad-a3d2d43516fd-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2p7dq\" (UID: \"63c5a169-7a26-45fb-90ad-a3d2d43516fd\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2p7dq" Dec 09 15:25:12 crc kubenswrapper[4716]: I1209 15:25:12.112311 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/63c5a169-7a26-45fb-90ad-a3d2d43516fd-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2p7dq\" (UID: \"63c5a169-7a26-45fb-90ad-a3d2d43516fd\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2p7dq" Dec 09 15:25:12 crc kubenswrapper[4716]: I1209 15:25:12.112416 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/63c5a169-7a26-45fb-90ad-a3d2d43516fd-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2p7dq\" (UID: \"63c5a169-7a26-45fb-90ad-a3d2d43516fd\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2p7dq" Dec 09 15:25:12 crc kubenswrapper[4716]: I1209 15:25:12.112490 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fr9wc\" (UniqueName: \"kubernetes.io/projected/63c5a169-7a26-45fb-90ad-a3d2d43516fd-kube-api-access-fr9wc\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2p7dq\" (UID: \"63c5a169-7a26-45fb-90ad-a3d2d43516fd\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2p7dq" Dec 09 15:25:12 crc kubenswrapper[4716]: I1209 15:25:12.113056 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/63c5a169-7a26-45fb-90ad-a3d2d43516fd-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2p7dq\" (UID: \"63c5a169-7a26-45fb-90ad-a3d2d43516fd\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2p7dq" Dec 09 15:25:12 crc kubenswrapper[4716]: I1209 15:25:12.113056 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/63c5a169-7a26-45fb-90ad-a3d2d43516fd-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2p7dq\" (UID: \"63c5a169-7a26-45fb-90ad-a3d2d43516fd\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2p7dq" Dec 09 15:25:12 crc kubenswrapper[4716]: I1209 15:25:12.135741 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fr9wc\" (UniqueName: \"kubernetes.io/projected/63c5a169-7a26-45fb-90ad-a3d2d43516fd-kube-api-access-fr9wc\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2p7dq\" (UID: \"63c5a169-7a26-45fb-90ad-a3d2d43516fd\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2p7dq" Dec 09 15:25:12 crc kubenswrapper[4716]: I1209 15:25:12.142178 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2p7dq" Dec 09 15:25:12 crc kubenswrapper[4716]: I1209 15:25:12.578228 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2p7dq"] Dec 09 15:25:12 crc kubenswrapper[4716]: I1209 15:25:12.764380 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2p7dq" event={"ID":"63c5a169-7a26-45fb-90ad-a3d2d43516fd","Type":"ContainerStarted","Data":"d3dea4d76a2881f3dacfcb3a12d1d4362ae24441920ae335f1cdb95ef2ff399a"} Dec 09 15:25:12 crc kubenswrapper[4716]: I1209 15:25:12.764436 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2p7dq" event={"ID":"63c5a169-7a26-45fb-90ad-a3d2d43516fd","Type":"ContainerStarted","Data":"1673ca67d6cb604bbb0fb46f0516c7d64b2382f7b4cfb51ba5f5bec8b167f4cf"} Dec 09 15:25:13 crc kubenswrapper[4716]: I1209 15:25:13.773873 4716 generic.go:334] "Generic (PLEG): container finished" podID="63c5a169-7a26-45fb-90ad-a3d2d43516fd" containerID="d3dea4d76a2881f3dacfcb3a12d1d4362ae24441920ae335f1cdb95ef2ff399a" exitCode=0 Dec 09 15:25:13 crc kubenswrapper[4716]: I1209 15:25:13.773942 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2p7dq" event={"ID":"63c5a169-7a26-45fb-90ad-a3d2d43516fd","Type":"ContainerDied","Data":"d3dea4d76a2881f3dacfcb3a12d1d4362ae24441920ae335f1cdb95ef2ff399a"} Dec 09 15:25:13 crc kubenswrapper[4716]: I1209 15:25:13.776270 4716 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 09 15:25:15 crc kubenswrapper[4716]: I1209 15:25:15.798520 4716 generic.go:334] "Generic (PLEG): container finished" podID="63c5a169-7a26-45fb-90ad-a3d2d43516fd" containerID="929bbadd973d9876b91877cfd5808dde8502d58433978b075ff3a0299612e397" exitCode=0 Dec 09 15:25:15 crc kubenswrapper[4716]: I1209 15:25:15.798561 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2p7dq" event={"ID":"63c5a169-7a26-45fb-90ad-a3d2d43516fd","Type":"ContainerDied","Data":"929bbadd973d9876b91877cfd5808dde8502d58433978b075ff3a0299612e397"} Dec 09 15:25:16 crc kubenswrapper[4716]: I1209 15:25:16.808703 4716 generic.go:334] "Generic (PLEG): container finished" podID="63c5a169-7a26-45fb-90ad-a3d2d43516fd" containerID="7324e10eb94c1c2f049e03f3a52068dafe0462446303dcc7e724487e7d4c48e3" exitCode=0 Dec 09 15:25:16 crc kubenswrapper[4716]: I1209 15:25:16.808746 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2p7dq" event={"ID":"63c5a169-7a26-45fb-90ad-a3d2d43516fd","Type":"ContainerDied","Data":"7324e10eb94c1c2f049e03f3a52068dafe0462446303dcc7e724487e7d4c48e3"} Dec 09 15:25:17 crc kubenswrapper[4716]: I1209 15:25:17.923883 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 15:25:17 crc kubenswrapper[4716]: I1209 15:25:17.923958 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 15:25:18 crc kubenswrapper[4716]: I1209 15:25:18.113357 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2p7dq" Dec 09 15:25:18 crc kubenswrapper[4716]: I1209 15:25:18.221314 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/63c5a169-7a26-45fb-90ad-a3d2d43516fd-bundle\") pod \"63c5a169-7a26-45fb-90ad-a3d2d43516fd\" (UID: \"63c5a169-7a26-45fb-90ad-a3d2d43516fd\") " Dec 09 15:25:18 crc kubenswrapper[4716]: I1209 15:25:18.221478 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fr9wc\" (UniqueName: \"kubernetes.io/projected/63c5a169-7a26-45fb-90ad-a3d2d43516fd-kube-api-access-fr9wc\") pod \"63c5a169-7a26-45fb-90ad-a3d2d43516fd\" (UID: \"63c5a169-7a26-45fb-90ad-a3d2d43516fd\") " Dec 09 15:25:18 crc kubenswrapper[4716]: I1209 15:25:18.221507 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/63c5a169-7a26-45fb-90ad-a3d2d43516fd-util\") pod \"63c5a169-7a26-45fb-90ad-a3d2d43516fd\" (UID: \"63c5a169-7a26-45fb-90ad-a3d2d43516fd\") " Dec 09 15:25:18 crc kubenswrapper[4716]: I1209 15:25:18.222173 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/63c5a169-7a26-45fb-90ad-a3d2d43516fd-bundle" (OuterVolumeSpecName: "bundle") pod "63c5a169-7a26-45fb-90ad-a3d2d43516fd" (UID: "63c5a169-7a26-45fb-90ad-a3d2d43516fd"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:25:18 crc kubenswrapper[4716]: I1209 15:25:18.228452 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63c5a169-7a26-45fb-90ad-a3d2d43516fd-kube-api-access-fr9wc" (OuterVolumeSpecName: "kube-api-access-fr9wc") pod "63c5a169-7a26-45fb-90ad-a3d2d43516fd" (UID: "63c5a169-7a26-45fb-90ad-a3d2d43516fd"). InnerVolumeSpecName "kube-api-access-fr9wc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:25:18 crc kubenswrapper[4716]: I1209 15:25:18.235962 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/63c5a169-7a26-45fb-90ad-a3d2d43516fd-util" (OuterVolumeSpecName: "util") pod "63c5a169-7a26-45fb-90ad-a3d2d43516fd" (UID: "63c5a169-7a26-45fb-90ad-a3d2d43516fd"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:25:18 crc kubenswrapper[4716]: I1209 15:25:18.325471 4716 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/63c5a169-7a26-45fb-90ad-a3d2d43516fd-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:25:18 crc kubenswrapper[4716]: I1209 15:25:18.325506 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fr9wc\" (UniqueName: \"kubernetes.io/projected/63c5a169-7a26-45fb-90ad-a3d2d43516fd-kube-api-access-fr9wc\") on node \"crc\" DevicePath \"\"" Dec 09 15:25:18 crc kubenswrapper[4716]: I1209 15:25:18.325518 4716 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/63c5a169-7a26-45fb-90ad-a3d2d43516fd-util\") on node \"crc\" DevicePath \"\"" Dec 09 15:25:18 crc kubenswrapper[4716]: I1209 15:25:18.823832 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2p7dq" event={"ID":"63c5a169-7a26-45fb-90ad-a3d2d43516fd","Type":"ContainerDied","Data":"1673ca67d6cb604bbb0fb46f0516c7d64b2382f7b4cfb51ba5f5bec8b167f4cf"} Dec 09 15:25:18 crc kubenswrapper[4716]: I1209 15:25:18.824187 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1673ca67d6cb604bbb0fb46f0516c7d64b2382f7b4cfb51ba5f5bec8b167f4cf" Dec 09 15:25:18 crc kubenswrapper[4716]: I1209 15:25:18.823912 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2p7dq" Dec 09 15:25:23 crc kubenswrapper[4716]: I1209 15:25:23.729792 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-kldvv"] Dec 09 15:25:23 crc kubenswrapper[4716]: E1209 15:25:23.732385 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63c5a169-7a26-45fb-90ad-a3d2d43516fd" containerName="util" Dec 09 15:25:23 crc kubenswrapper[4716]: I1209 15:25:23.732485 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="63c5a169-7a26-45fb-90ad-a3d2d43516fd" containerName="util" Dec 09 15:25:23 crc kubenswrapper[4716]: E1209 15:25:23.732561 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63c5a169-7a26-45fb-90ad-a3d2d43516fd" containerName="extract" Dec 09 15:25:23 crc kubenswrapper[4716]: I1209 15:25:23.732614 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="63c5a169-7a26-45fb-90ad-a3d2d43516fd" containerName="extract" Dec 09 15:25:23 crc kubenswrapper[4716]: E1209 15:25:23.732706 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63c5a169-7a26-45fb-90ad-a3d2d43516fd" containerName="pull" Dec 09 15:25:23 crc kubenswrapper[4716]: I1209 15:25:23.732766 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="63c5a169-7a26-45fb-90ad-a3d2d43516fd" containerName="pull" Dec 09 15:25:23 crc kubenswrapper[4716]: I1209 15:25:23.732982 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="63c5a169-7a26-45fb-90ad-a3d2d43516fd" containerName="extract" Dec 09 15:25:23 crc kubenswrapper[4716]: I1209 15:25:23.733986 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-kldvv" Dec 09 15:25:23 crc kubenswrapper[4716]: I1209 15:25:23.737086 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Dec 09 15:25:23 crc kubenswrapper[4716]: I1209 15:25:23.737155 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Dec 09 15:25:23 crc kubenswrapper[4716]: I1209 15:25:23.737561 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-c2xd5" Dec 09 15:25:23 crc kubenswrapper[4716]: I1209 15:25:23.737945 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-kldvv"] Dec 09 15:25:23 crc kubenswrapper[4716]: I1209 15:25:23.815705 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cmjpw\" (UniqueName: \"kubernetes.io/projected/07c2807c-d231-400f-aef5-4a71fc98749b-kube-api-access-cmjpw\") pod \"nmstate-operator-5b5b58f5c8-kldvv\" (UID: \"07c2807c-d231-400f-aef5-4a71fc98749b\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-kldvv" Dec 09 15:25:23 crc kubenswrapper[4716]: I1209 15:25:23.932046 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cmjpw\" (UniqueName: \"kubernetes.io/projected/07c2807c-d231-400f-aef5-4a71fc98749b-kube-api-access-cmjpw\") pod \"nmstate-operator-5b5b58f5c8-kldvv\" (UID: \"07c2807c-d231-400f-aef5-4a71fc98749b\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-kldvv" Dec 09 15:25:23 crc kubenswrapper[4716]: I1209 15:25:23.974361 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cmjpw\" (UniqueName: \"kubernetes.io/projected/07c2807c-d231-400f-aef5-4a71fc98749b-kube-api-access-cmjpw\") pod \"nmstate-operator-5b5b58f5c8-kldvv\" (UID: \"07c2807c-d231-400f-aef5-4a71fc98749b\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-kldvv" Dec 09 15:25:24 crc kubenswrapper[4716]: I1209 15:25:24.061468 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-kldvv" Dec 09 15:25:24 crc kubenswrapper[4716]: I1209 15:25:24.550364 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-kldvv"] Dec 09 15:25:24 crc kubenswrapper[4716]: I1209 15:25:24.887866 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-kldvv" event={"ID":"07c2807c-d231-400f-aef5-4a71fc98749b","Type":"ContainerStarted","Data":"4ce16b509d7cd41d6d1a253688f0c27e3bb4852b970ee1a7c5238b0a7c7ff661"} Dec 09 15:25:27 crc kubenswrapper[4716]: I1209 15:25:27.909908 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-kldvv" event={"ID":"07c2807c-d231-400f-aef5-4a71fc98749b","Type":"ContainerStarted","Data":"51571167ffcc8ce5265ad76ff826a8a33170dcead7b4b7e827a49b389989e567"} Dec 09 15:25:27 crc kubenswrapper[4716]: I1209 15:25:27.943179 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-kldvv" podStartSLOduration=2.470974336 podStartE2EDuration="4.943149277s" podCreationTimestamp="2025-12-09 15:25:23 +0000 UTC" firstStartedPulling="2025-12-09 15:25:24.559053172 +0000 UTC m=+1011.713797160" lastFinishedPulling="2025-12-09 15:25:27.031228113 +0000 UTC m=+1014.185972101" observedRunningTime="2025-12-09 15:25:27.933396027 +0000 UTC m=+1015.088140015" watchObservedRunningTime="2025-12-09 15:25:27.943149277 +0000 UTC m=+1015.097893285" Dec 09 15:25:31 crc kubenswrapper[4716]: I1209 15:25:31.724178 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-qgx2m"] Dec 09 15:25:31 crc kubenswrapper[4716]: I1209 15:25:31.726045 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-qgx2m" Dec 09 15:25:31 crc kubenswrapper[4716]: I1209 15:25:31.729741 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-22dd7" Dec 09 15:25:31 crc kubenswrapper[4716]: I1209 15:25:31.733674 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-w8bpd"] Dec 09 15:25:31 crc kubenswrapper[4716]: I1209 15:25:31.734655 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-w8bpd" Dec 09 15:25:31 crc kubenswrapper[4716]: I1209 15:25:31.736452 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Dec 09 15:25:31 crc kubenswrapper[4716]: I1209 15:25:31.739729 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-qgx2m"] Dec 09 15:25:31 crc kubenswrapper[4716]: I1209 15:25:31.749382 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-w8bpd"] Dec 09 15:25:31 crc kubenswrapper[4716]: I1209 15:25:31.766748 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fjn6h\" (UniqueName: \"kubernetes.io/projected/71d69185-6b38-4b98-9699-40641b4ce638-kube-api-access-fjn6h\") pod \"nmstate-metrics-7f946cbc9-qgx2m\" (UID: \"71d69185-6b38-4b98-9699-40641b4ce638\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-qgx2m" Dec 09 15:25:31 crc kubenswrapper[4716]: I1209 15:25:31.766929 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/7e2a8951-b8c0-43a4-ad05-ca0d0000d70b-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-w8bpd\" (UID: \"7e2a8951-b8c0-43a4-ad05-ca0d0000d70b\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-w8bpd" Dec 09 15:25:31 crc kubenswrapper[4716]: I1209 15:25:31.767017 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6957v\" (UniqueName: \"kubernetes.io/projected/7e2a8951-b8c0-43a4-ad05-ca0d0000d70b-kube-api-access-6957v\") pod \"nmstate-webhook-5f6d4c5ccb-w8bpd\" (UID: \"7e2a8951-b8c0-43a4-ad05-ca0d0000d70b\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-w8bpd" Dec 09 15:25:31 crc kubenswrapper[4716]: I1209 15:25:31.781495 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-8t6l4"] Dec 09 15:25:31 crc kubenswrapper[4716]: I1209 15:25:31.782601 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-8t6l4" Dec 09 15:25:31 crc kubenswrapper[4716]: I1209 15:25:31.869008 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/6456c272-4118-4d6d-837a-37e423889025-nmstate-lock\") pod \"nmstate-handler-8t6l4\" (UID: \"6456c272-4118-4d6d-837a-37e423889025\") " pod="openshift-nmstate/nmstate-handler-8t6l4" Dec 09 15:25:31 crc kubenswrapper[4716]: I1209 15:25:31.869104 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/6456c272-4118-4d6d-837a-37e423889025-ovs-socket\") pod \"nmstate-handler-8t6l4\" (UID: \"6456c272-4118-4d6d-837a-37e423889025\") " pod="openshift-nmstate/nmstate-handler-8t6l4" Dec 09 15:25:31 crc kubenswrapper[4716]: I1209 15:25:31.869147 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/7e2a8951-b8c0-43a4-ad05-ca0d0000d70b-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-w8bpd\" (UID: \"7e2a8951-b8c0-43a4-ad05-ca0d0000d70b\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-w8bpd" Dec 09 15:25:31 crc kubenswrapper[4716]: I1209 15:25:31.869194 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8dp2d\" (UniqueName: \"kubernetes.io/projected/6456c272-4118-4d6d-837a-37e423889025-kube-api-access-8dp2d\") pod \"nmstate-handler-8t6l4\" (UID: \"6456c272-4118-4d6d-837a-37e423889025\") " pod="openshift-nmstate/nmstate-handler-8t6l4" Dec 09 15:25:31 crc kubenswrapper[4716]: I1209 15:25:31.869287 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6957v\" (UniqueName: \"kubernetes.io/projected/7e2a8951-b8c0-43a4-ad05-ca0d0000d70b-kube-api-access-6957v\") pod \"nmstate-webhook-5f6d4c5ccb-w8bpd\" (UID: \"7e2a8951-b8c0-43a4-ad05-ca0d0000d70b\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-w8bpd" Dec 09 15:25:31 crc kubenswrapper[4716]: I1209 15:25:31.869352 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fjn6h\" (UniqueName: \"kubernetes.io/projected/71d69185-6b38-4b98-9699-40641b4ce638-kube-api-access-fjn6h\") pod \"nmstate-metrics-7f946cbc9-qgx2m\" (UID: \"71d69185-6b38-4b98-9699-40641b4ce638\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-qgx2m" Dec 09 15:25:31 crc kubenswrapper[4716]: I1209 15:25:31.869389 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/6456c272-4118-4d6d-837a-37e423889025-dbus-socket\") pod \"nmstate-handler-8t6l4\" (UID: \"6456c272-4118-4d6d-837a-37e423889025\") " pod="openshift-nmstate/nmstate-handler-8t6l4" Dec 09 15:25:31 crc kubenswrapper[4716]: E1209 15:25:31.869570 4716 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Dec 09 15:25:31 crc kubenswrapper[4716]: E1209 15:25:31.869639 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7e2a8951-b8c0-43a4-ad05-ca0d0000d70b-tls-key-pair podName:7e2a8951-b8c0-43a4-ad05-ca0d0000d70b nodeName:}" failed. No retries permitted until 2025-12-09 15:25:32.369607451 +0000 UTC m=+1019.524351439 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/7e2a8951-b8c0-43a4-ad05-ca0d0000d70b-tls-key-pair") pod "nmstate-webhook-5f6d4c5ccb-w8bpd" (UID: "7e2a8951-b8c0-43a4-ad05-ca0d0000d70b") : secret "openshift-nmstate-webhook" not found Dec 09 15:25:31 crc kubenswrapper[4716]: I1209 15:25:31.893694 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fjn6h\" (UniqueName: \"kubernetes.io/projected/71d69185-6b38-4b98-9699-40641b4ce638-kube-api-access-fjn6h\") pod \"nmstate-metrics-7f946cbc9-qgx2m\" (UID: \"71d69185-6b38-4b98-9699-40641b4ce638\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-qgx2m" Dec 09 15:25:31 crc kubenswrapper[4716]: I1209 15:25:31.900393 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-52knq"] Dec 09 15:25:31 crc kubenswrapper[4716]: I1209 15:25:31.901684 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6957v\" (UniqueName: \"kubernetes.io/projected/7e2a8951-b8c0-43a4-ad05-ca0d0000d70b-kube-api-access-6957v\") pod \"nmstate-webhook-5f6d4c5ccb-w8bpd\" (UID: \"7e2a8951-b8c0-43a4-ad05-ca0d0000d70b\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-w8bpd" Dec 09 15:25:31 crc kubenswrapper[4716]: I1209 15:25:31.901738 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-52knq" Dec 09 15:25:31 crc kubenswrapper[4716]: I1209 15:25:31.911467 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-pg8dw" Dec 09 15:25:31 crc kubenswrapper[4716]: I1209 15:25:31.912139 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Dec 09 15:25:31 crc kubenswrapper[4716]: I1209 15:25:31.927027 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Dec 09 15:25:31 crc kubenswrapper[4716]: I1209 15:25:31.935448 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-52knq"] Dec 09 15:25:31 crc kubenswrapper[4716]: I1209 15:25:31.974904 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/6456c272-4118-4d6d-837a-37e423889025-dbus-socket\") pod \"nmstate-handler-8t6l4\" (UID: \"6456c272-4118-4d6d-837a-37e423889025\") " pod="openshift-nmstate/nmstate-handler-8t6l4" Dec 09 15:25:31 crc kubenswrapper[4716]: I1209 15:25:31.975065 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/6456c272-4118-4d6d-837a-37e423889025-nmstate-lock\") pod \"nmstate-handler-8t6l4\" (UID: \"6456c272-4118-4d6d-837a-37e423889025\") " pod="openshift-nmstate/nmstate-handler-8t6l4" Dec 09 15:25:31 crc kubenswrapper[4716]: I1209 15:25:31.975132 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/6456c272-4118-4d6d-837a-37e423889025-ovs-socket\") pod \"nmstate-handler-8t6l4\" (UID: \"6456c272-4118-4d6d-837a-37e423889025\") " pod="openshift-nmstate/nmstate-handler-8t6l4" Dec 09 15:25:31 crc kubenswrapper[4716]: I1209 15:25:31.975253 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8dp2d\" (UniqueName: \"kubernetes.io/projected/6456c272-4118-4d6d-837a-37e423889025-kube-api-access-8dp2d\") pod \"nmstate-handler-8t6l4\" (UID: \"6456c272-4118-4d6d-837a-37e423889025\") " pod="openshift-nmstate/nmstate-handler-8t6l4" Dec 09 15:25:31 crc kubenswrapper[4716]: I1209 15:25:31.975582 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/6456c272-4118-4d6d-837a-37e423889025-dbus-socket\") pod \"nmstate-handler-8t6l4\" (UID: \"6456c272-4118-4d6d-837a-37e423889025\") " pod="openshift-nmstate/nmstate-handler-8t6l4" Dec 09 15:25:31 crc kubenswrapper[4716]: I1209 15:25:31.975697 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/6456c272-4118-4d6d-837a-37e423889025-nmstate-lock\") pod \"nmstate-handler-8t6l4\" (UID: \"6456c272-4118-4d6d-837a-37e423889025\") " pod="openshift-nmstate/nmstate-handler-8t6l4" Dec 09 15:25:31 crc kubenswrapper[4716]: I1209 15:25:31.975748 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/6456c272-4118-4d6d-837a-37e423889025-ovs-socket\") pod \"nmstate-handler-8t6l4\" (UID: \"6456c272-4118-4d6d-837a-37e423889025\") " pod="openshift-nmstate/nmstate-handler-8t6l4" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.001788 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8dp2d\" (UniqueName: \"kubernetes.io/projected/6456c272-4118-4d6d-837a-37e423889025-kube-api-access-8dp2d\") pod \"nmstate-handler-8t6l4\" (UID: \"6456c272-4118-4d6d-837a-37e423889025\") " pod="openshift-nmstate/nmstate-handler-8t6l4" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.077033 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/c3cafcff-7dbc-4599-9fb2-c2931006503d-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-52knq\" (UID: \"c3cafcff-7dbc-4599-9fb2-c2931006503d\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-52knq" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.077463 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qmtmf\" (UniqueName: \"kubernetes.io/projected/c3cafcff-7dbc-4599-9fb2-c2931006503d-kube-api-access-qmtmf\") pod \"nmstate-console-plugin-7fbb5f6569-52knq\" (UID: \"c3cafcff-7dbc-4599-9fb2-c2931006503d\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-52knq" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.077685 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/c3cafcff-7dbc-4599-9fb2-c2931006503d-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-52knq\" (UID: \"c3cafcff-7dbc-4599-9fb2-c2931006503d\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-52knq" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.091973 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-qgx2m" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.127025 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-8t6l4" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.169910 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f76b57f8b-ltlwj"] Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.171266 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f76b57f8b-ltlwj" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.181102 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f2721f4c-09da-4cd3-a266-8df2c3aca030-service-ca\") pod \"console-f76b57f8b-ltlwj\" (UID: \"f2721f4c-09da-4cd3-a266-8df2c3aca030\") " pod="openshift-console/console-f76b57f8b-ltlwj" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.181245 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f2721f4c-09da-4cd3-a266-8df2c3aca030-console-oauth-config\") pod \"console-f76b57f8b-ltlwj\" (UID: \"f2721f4c-09da-4cd3-a266-8df2c3aca030\") " pod="openshift-console/console-f76b57f8b-ltlwj" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.181327 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f2721f4c-09da-4cd3-a266-8df2c3aca030-console-config\") pod \"console-f76b57f8b-ltlwj\" (UID: \"f2721f4c-09da-4cd3-a266-8df2c3aca030\") " pod="openshift-console/console-f76b57f8b-ltlwj" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.181408 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/c3cafcff-7dbc-4599-9fb2-c2931006503d-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-52knq\" (UID: \"c3cafcff-7dbc-4599-9fb2-c2931006503d\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-52knq" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.181492 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f2721f4c-09da-4cd3-a266-8df2c3aca030-oauth-serving-cert\") pod \"console-f76b57f8b-ltlwj\" (UID: \"f2721f4c-09da-4cd3-a266-8df2c3aca030\") " pod="openshift-console/console-f76b57f8b-ltlwj" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.181591 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rbxqz\" (UniqueName: \"kubernetes.io/projected/f2721f4c-09da-4cd3-a266-8df2c3aca030-kube-api-access-rbxqz\") pod \"console-f76b57f8b-ltlwj\" (UID: \"f2721f4c-09da-4cd3-a266-8df2c3aca030\") " pod="openshift-console/console-f76b57f8b-ltlwj" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.181732 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qmtmf\" (UniqueName: \"kubernetes.io/projected/c3cafcff-7dbc-4599-9fb2-c2931006503d-kube-api-access-qmtmf\") pod \"nmstate-console-plugin-7fbb5f6569-52knq\" (UID: \"c3cafcff-7dbc-4599-9fb2-c2931006503d\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-52knq" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.181822 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/c3cafcff-7dbc-4599-9fb2-c2931006503d-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-52knq\" (UID: \"c3cafcff-7dbc-4599-9fb2-c2931006503d\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-52knq" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.181907 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f2721f4c-09da-4cd3-a266-8df2c3aca030-console-serving-cert\") pod \"console-f76b57f8b-ltlwj\" (UID: \"f2721f4c-09da-4cd3-a266-8df2c3aca030\") " pod="openshift-console/console-f76b57f8b-ltlwj" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.181978 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f2721f4c-09da-4cd3-a266-8df2c3aca030-trusted-ca-bundle\") pod \"console-f76b57f8b-ltlwj\" (UID: \"f2721f4c-09da-4cd3-a266-8df2c3aca030\") " pod="openshift-console/console-f76b57f8b-ltlwj" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.183299 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/c3cafcff-7dbc-4599-9fb2-c2931006503d-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-52knq\" (UID: \"c3cafcff-7dbc-4599-9fb2-c2931006503d\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-52knq" Dec 09 15:25:32 crc kubenswrapper[4716]: E1209 15:25:32.183833 4716 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Dec 09 15:25:32 crc kubenswrapper[4716]: E1209 15:25:32.183942 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c3cafcff-7dbc-4599-9fb2-c2931006503d-plugin-serving-cert podName:c3cafcff-7dbc-4599-9fb2-c2931006503d nodeName:}" failed. No retries permitted until 2025-12-09 15:25:32.683917779 +0000 UTC m=+1019.838661837 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/c3cafcff-7dbc-4599-9fb2-c2931006503d-plugin-serving-cert") pod "nmstate-console-plugin-7fbb5f6569-52knq" (UID: "c3cafcff-7dbc-4599-9fb2-c2931006503d") : secret "plugin-serving-cert" not found Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.197228 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f76b57f8b-ltlwj"] Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.215920 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qmtmf\" (UniqueName: \"kubernetes.io/projected/c3cafcff-7dbc-4599-9fb2-c2931006503d-kube-api-access-qmtmf\") pod \"nmstate-console-plugin-7fbb5f6569-52knq\" (UID: \"c3cafcff-7dbc-4599-9fb2-c2931006503d\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-52knq" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.282974 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rbxqz\" (UniqueName: \"kubernetes.io/projected/f2721f4c-09da-4cd3-a266-8df2c3aca030-kube-api-access-rbxqz\") pod \"console-f76b57f8b-ltlwj\" (UID: \"f2721f4c-09da-4cd3-a266-8df2c3aca030\") " pod="openshift-console/console-f76b57f8b-ltlwj" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.283131 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f2721f4c-09da-4cd3-a266-8df2c3aca030-console-serving-cert\") pod \"console-f76b57f8b-ltlwj\" (UID: \"f2721f4c-09da-4cd3-a266-8df2c3aca030\") " pod="openshift-console/console-f76b57f8b-ltlwj" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.283163 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f2721f4c-09da-4cd3-a266-8df2c3aca030-trusted-ca-bundle\") pod \"console-f76b57f8b-ltlwj\" (UID: \"f2721f4c-09da-4cd3-a266-8df2c3aca030\") " pod="openshift-console/console-f76b57f8b-ltlwj" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.283215 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f2721f4c-09da-4cd3-a266-8df2c3aca030-service-ca\") pod \"console-f76b57f8b-ltlwj\" (UID: \"f2721f4c-09da-4cd3-a266-8df2c3aca030\") " pod="openshift-console/console-f76b57f8b-ltlwj" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.283318 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f2721f4c-09da-4cd3-a266-8df2c3aca030-console-oauth-config\") pod \"console-f76b57f8b-ltlwj\" (UID: \"f2721f4c-09da-4cd3-a266-8df2c3aca030\") " pod="openshift-console/console-f76b57f8b-ltlwj" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.283344 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f2721f4c-09da-4cd3-a266-8df2c3aca030-console-config\") pod \"console-f76b57f8b-ltlwj\" (UID: \"f2721f4c-09da-4cd3-a266-8df2c3aca030\") " pod="openshift-console/console-f76b57f8b-ltlwj" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.283395 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f2721f4c-09da-4cd3-a266-8df2c3aca030-oauth-serving-cert\") pod \"console-f76b57f8b-ltlwj\" (UID: \"f2721f4c-09da-4cd3-a266-8df2c3aca030\") " pod="openshift-console/console-f76b57f8b-ltlwj" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.286370 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f2721f4c-09da-4cd3-a266-8df2c3aca030-oauth-serving-cert\") pod \"console-f76b57f8b-ltlwj\" (UID: \"f2721f4c-09da-4cd3-a266-8df2c3aca030\") " pod="openshift-console/console-f76b57f8b-ltlwj" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.287160 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f2721f4c-09da-4cd3-a266-8df2c3aca030-service-ca\") pod \"console-f76b57f8b-ltlwj\" (UID: \"f2721f4c-09da-4cd3-a266-8df2c3aca030\") " pod="openshift-console/console-f76b57f8b-ltlwj" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.290319 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f2721f4c-09da-4cd3-a266-8df2c3aca030-console-config\") pod \"console-f76b57f8b-ltlwj\" (UID: \"f2721f4c-09da-4cd3-a266-8df2c3aca030\") " pod="openshift-console/console-f76b57f8b-ltlwj" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.297527 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f2721f4c-09da-4cd3-a266-8df2c3aca030-trusted-ca-bundle\") pod \"console-f76b57f8b-ltlwj\" (UID: \"f2721f4c-09da-4cd3-a266-8df2c3aca030\") " pod="openshift-console/console-f76b57f8b-ltlwj" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.301025 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f2721f4c-09da-4cd3-a266-8df2c3aca030-console-serving-cert\") pod \"console-f76b57f8b-ltlwj\" (UID: \"f2721f4c-09da-4cd3-a266-8df2c3aca030\") " pod="openshift-console/console-f76b57f8b-ltlwj" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.303607 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f2721f4c-09da-4cd3-a266-8df2c3aca030-console-oauth-config\") pod \"console-f76b57f8b-ltlwj\" (UID: \"f2721f4c-09da-4cd3-a266-8df2c3aca030\") " pod="openshift-console/console-f76b57f8b-ltlwj" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.312024 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rbxqz\" (UniqueName: \"kubernetes.io/projected/f2721f4c-09da-4cd3-a266-8df2c3aca030-kube-api-access-rbxqz\") pod \"console-f76b57f8b-ltlwj\" (UID: \"f2721f4c-09da-4cd3-a266-8df2c3aca030\") " pod="openshift-console/console-f76b57f8b-ltlwj" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.385045 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/7e2a8951-b8c0-43a4-ad05-ca0d0000d70b-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-w8bpd\" (UID: \"7e2a8951-b8c0-43a4-ad05-ca0d0000d70b\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-w8bpd" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.390444 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/7e2a8951-b8c0-43a4-ad05-ca0d0000d70b-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-w8bpd\" (UID: \"7e2a8951-b8c0-43a4-ad05-ca0d0000d70b\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-w8bpd" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.415135 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-w8bpd" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.560548 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f76b57f8b-ltlwj" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.631113 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-qgx2m"] Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.692285 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/c3cafcff-7dbc-4599-9fb2-c2931006503d-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-52knq\" (UID: \"c3cafcff-7dbc-4599-9fb2-c2931006503d\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-52knq" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.697358 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/c3cafcff-7dbc-4599-9fb2-c2931006503d-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-52knq\" (UID: \"c3cafcff-7dbc-4599-9fb2-c2931006503d\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-52knq" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.888408 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-52knq" Dec 09 15:25:32 crc kubenswrapper[4716]: I1209 15:25:32.909199 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-w8bpd"] Dec 09 15:25:32 crc kubenswrapper[4716]: W1209 15:25:32.922848 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7e2a8951_b8c0_43a4_ad05_ca0d0000d70b.slice/crio-6e2c853c94725e4f0b663492793047aa90815c2029f753997daa3516f861cb8f WatchSource:0}: Error finding container 6e2c853c94725e4f0b663492793047aa90815c2029f753997daa3516f861cb8f: Status 404 returned error can't find the container with id 6e2c853c94725e4f0b663492793047aa90815c2029f753997daa3516f861cb8f Dec 09 15:25:33 crc kubenswrapper[4716]: I1209 15:25:33.007807 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-8t6l4" event={"ID":"6456c272-4118-4d6d-837a-37e423889025","Type":"ContainerStarted","Data":"f9dd926b53955c41452871a21aed4915f2e281396dbcf019f9ddd2555d71e6d7"} Dec 09 15:25:33 crc kubenswrapper[4716]: I1209 15:25:33.010005 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-w8bpd" event={"ID":"7e2a8951-b8c0-43a4-ad05-ca0d0000d70b","Type":"ContainerStarted","Data":"6e2c853c94725e4f0b663492793047aa90815c2029f753997daa3516f861cb8f"} Dec 09 15:25:33 crc kubenswrapper[4716]: I1209 15:25:33.011168 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-qgx2m" event={"ID":"71d69185-6b38-4b98-9699-40641b4ce638","Type":"ContainerStarted","Data":"e9a8eb76ef83a21148037b3bb9e1e897e2252691c5e7db1a510b0a02883f0a32"} Dec 09 15:25:33 crc kubenswrapper[4716]: I1209 15:25:33.037429 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f76b57f8b-ltlwj"] Dec 09 15:25:33 crc kubenswrapper[4716]: I1209 15:25:33.385510 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-52knq"] Dec 09 15:25:34 crc kubenswrapper[4716]: I1209 15:25:34.023376 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-52knq" event={"ID":"c3cafcff-7dbc-4599-9fb2-c2931006503d","Type":"ContainerStarted","Data":"26b2acea83f57e209da2319f77b019716570b311a32868416ea4dcf787eb242f"} Dec 09 15:25:34 crc kubenswrapper[4716]: I1209 15:25:34.025440 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f76b57f8b-ltlwj" event={"ID":"f2721f4c-09da-4cd3-a266-8df2c3aca030","Type":"ContainerStarted","Data":"3529d187f9476fc1171f80aae8304a6ec3bd9c2a4c2d383865646d8ece314e67"} Dec 09 15:25:34 crc kubenswrapper[4716]: I1209 15:25:34.025479 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f76b57f8b-ltlwj" event={"ID":"f2721f4c-09da-4cd3-a266-8df2c3aca030","Type":"ContainerStarted","Data":"73046e778b7911ce8a84efe08993412ec9dcf2b8b2bb767f07ad208ef24e5495"} Dec 09 15:25:34 crc kubenswrapper[4716]: I1209 15:25:34.050428 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f76b57f8b-ltlwj" podStartSLOduration=2.050411702 podStartE2EDuration="2.050411702s" podCreationTimestamp="2025-12-09 15:25:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:25:34.049874146 +0000 UTC m=+1021.204618134" watchObservedRunningTime="2025-12-09 15:25:34.050411702 +0000 UTC m=+1021.205155690" Dec 09 15:25:37 crc kubenswrapper[4716]: I1209 15:25:37.052503 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-8t6l4" event={"ID":"6456c272-4118-4d6d-837a-37e423889025","Type":"ContainerStarted","Data":"1a051b378853e7ac1440aefe87e78ce9adf200e31de3f1170dc41a49ac25ad2e"} Dec 09 15:25:37 crc kubenswrapper[4716]: I1209 15:25:37.053283 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-8t6l4" Dec 09 15:25:37 crc kubenswrapper[4716]: I1209 15:25:37.055450 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-52knq" event={"ID":"c3cafcff-7dbc-4599-9fb2-c2931006503d","Type":"ContainerStarted","Data":"f7b5b2e245faa400319d68995e422f1cec72a1ea750d03cf1ad6a749c41a56fb"} Dec 09 15:25:37 crc kubenswrapper[4716]: I1209 15:25:37.057665 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-w8bpd" event={"ID":"7e2a8951-b8c0-43a4-ad05-ca0d0000d70b","Type":"ContainerStarted","Data":"8c38adda019b65cef6b2593eb7345d142f6425ae02255fc2527f34c6cbe71f6d"} Dec 09 15:25:37 crc kubenswrapper[4716]: I1209 15:25:37.057818 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-w8bpd" Dec 09 15:25:37 crc kubenswrapper[4716]: I1209 15:25:37.059324 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-qgx2m" event={"ID":"71d69185-6b38-4b98-9699-40641b4ce638","Type":"ContainerStarted","Data":"8b1b966e20ddfa9a573a95ab082e800fea81610491da88a3b2cc0402e11ed365"} Dec 09 15:25:37 crc kubenswrapper[4716]: I1209 15:25:37.078353 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-8t6l4" podStartSLOduration=2.497319334 podStartE2EDuration="6.078327265s" podCreationTimestamp="2025-12-09 15:25:31 +0000 UTC" firstStartedPulling="2025-12-09 15:25:32.247526226 +0000 UTC m=+1019.402270214" lastFinishedPulling="2025-12-09 15:25:35.828534157 +0000 UTC m=+1022.983278145" observedRunningTime="2025-12-09 15:25:37.067989568 +0000 UTC m=+1024.222733556" watchObservedRunningTime="2025-12-09 15:25:37.078327265 +0000 UTC m=+1024.233071253" Dec 09 15:25:37 crc kubenswrapper[4716]: I1209 15:25:37.094118 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-52knq" podStartSLOduration=3.659250668 podStartE2EDuration="6.094094358s" podCreationTimestamp="2025-12-09 15:25:31 +0000 UTC" firstStartedPulling="2025-12-09 15:25:33.392634457 +0000 UTC m=+1020.547378445" lastFinishedPulling="2025-12-09 15:25:35.827478147 +0000 UTC m=+1022.982222135" observedRunningTime="2025-12-09 15:25:37.089035483 +0000 UTC m=+1024.243779481" watchObservedRunningTime="2025-12-09 15:25:37.094094358 +0000 UTC m=+1024.248838346" Dec 09 15:25:37 crc kubenswrapper[4716]: I1209 15:25:37.120058 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-w8bpd" podStartSLOduration=3.18859642 podStartE2EDuration="6.120040723s" podCreationTimestamp="2025-12-09 15:25:31 +0000 UTC" firstStartedPulling="2025-12-09 15:25:32.926272062 +0000 UTC m=+1020.081016050" lastFinishedPulling="2025-12-09 15:25:35.857716365 +0000 UTC m=+1023.012460353" observedRunningTime="2025-12-09 15:25:37.118424657 +0000 UTC m=+1024.273168655" watchObservedRunningTime="2025-12-09 15:25:37.120040723 +0000 UTC m=+1024.274784711" Dec 09 15:25:40 crc kubenswrapper[4716]: I1209 15:25:40.094488 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-qgx2m" event={"ID":"71d69185-6b38-4b98-9699-40641b4ce638","Type":"ContainerStarted","Data":"d69980a53ab70ef56f385043ac7852895ffa1c61cd0eaf47689627a7c71ad89e"} Dec 09 15:25:40 crc kubenswrapper[4716]: I1209 15:25:40.114876 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-qgx2m" podStartSLOduration=2.327176548 podStartE2EDuration="9.114849607s" podCreationTimestamp="2025-12-09 15:25:31 +0000 UTC" firstStartedPulling="2025-12-09 15:25:32.643361726 +0000 UTC m=+1019.798105714" lastFinishedPulling="2025-12-09 15:25:39.431034785 +0000 UTC m=+1026.585778773" observedRunningTime="2025-12-09 15:25:40.108518135 +0000 UTC m=+1027.263262153" watchObservedRunningTime="2025-12-09 15:25:40.114849607 +0000 UTC m=+1027.269593615" Dec 09 15:25:42 crc kubenswrapper[4716]: I1209 15:25:42.159135 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-8t6l4" Dec 09 15:25:42 crc kubenswrapper[4716]: I1209 15:25:42.561644 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f76b57f8b-ltlwj" Dec 09 15:25:42 crc kubenswrapper[4716]: I1209 15:25:42.561695 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f76b57f8b-ltlwj" Dec 09 15:25:42 crc kubenswrapper[4716]: I1209 15:25:42.566734 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f76b57f8b-ltlwj" Dec 09 15:25:43 crc kubenswrapper[4716]: I1209 15:25:43.143162 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f76b57f8b-ltlwj" Dec 09 15:25:43 crc kubenswrapper[4716]: I1209 15:25:43.205853 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-76868b5959-ctmcp"] Dec 09 15:25:47 crc kubenswrapper[4716]: I1209 15:25:47.921967 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 15:25:47 crc kubenswrapper[4716]: I1209 15:25:47.922531 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 15:25:52 crc kubenswrapper[4716]: I1209 15:25:52.427271 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-w8bpd" Dec 09 15:26:08 crc kubenswrapper[4716]: I1209 15:26:08.253306 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-76868b5959-ctmcp" podUID="d81edbe4-5c43-454c-b78d-396a1dc373af" containerName="console" containerID="cri-o://56b9298e6de7ff2f0946284b391239362bd9ae73baa2c16bf077f09b52b4d298" gracePeriod=15 Dec 09 15:26:08 crc kubenswrapper[4716]: I1209 15:26:08.832529 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-76868b5959-ctmcp_d81edbe4-5c43-454c-b78d-396a1dc373af/console/0.log" Dec 09 15:26:08 crc kubenswrapper[4716]: I1209 15:26:08.833005 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-76868b5959-ctmcp" Dec 09 15:26:08 crc kubenswrapper[4716]: I1209 15:26:08.998826 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5vtg6\" (UniqueName: \"kubernetes.io/projected/d81edbe4-5c43-454c-b78d-396a1dc373af-kube-api-access-5vtg6\") pod \"d81edbe4-5c43-454c-b78d-396a1dc373af\" (UID: \"d81edbe4-5c43-454c-b78d-396a1dc373af\") " Dec 09 15:26:08 crc kubenswrapper[4716]: I1209 15:26:08.998876 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/d81edbe4-5c43-454c-b78d-396a1dc373af-console-config\") pod \"d81edbe4-5c43-454c-b78d-396a1dc373af\" (UID: \"d81edbe4-5c43-454c-b78d-396a1dc373af\") " Dec 09 15:26:08 crc kubenswrapper[4716]: I1209 15:26:08.998920 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/d81edbe4-5c43-454c-b78d-396a1dc373af-oauth-serving-cert\") pod \"d81edbe4-5c43-454c-b78d-396a1dc373af\" (UID: \"d81edbe4-5c43-454c-b78d-396a1dc373af\") " Dec 09 15:26:08 crc kubenswrapper[4716]: I1209 15:26:08.999038 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/d81edbe4-5c43-454c-b78d-396a1dc373af-console-oauth-config\") pod \"d81edbe4-5c43-454c-b78d-396a1dc373af\" (UID: \"d81edbe4-5c43-454c-b78d-396a1dc373af\") " Dec 09 15:26:08 crc kubenswrapper[4716]: I1209 15:26:08.999114 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/d81edbe4-5c43-454c-b78d-396a1dc373af-console-serving-cert\") pod \"d81edbe4-5c43-454c-b78d-396a1dc373af\" (UID: \"d81edbe4-5c43-454c-b78d-396a1dc373af\") " Dec 09 15:26:08 crc kubenswrapper[4716]: I1209 15:26:08.999135 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d81edbe4-5c43-454c-b78d-396a1dc373af-service-ca\") pod \"d81edbe4-5c43-454c-b78d-396a1dc373af\" (UID: \"d81edbe4-5c43-454c-b78d-396a1dc373af\") " Dec 09 15:26:08 crc kubenswrapper[4716]: I1209 15:26:08.999214 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d81edbe4-5c43-454c-b78d-396a1dc373af-trusted-ca-bundle\") pod \"d81edbe4-5c43-454c-b78d-396a1dc373af\" (UID: \"d81edbe4-5c43-454c-b78d-396a1dc373af\") " Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.000257 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d81edbe4-5c43-454c-b78d-396a1dc373af-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "d81edbe4-5c43-454c-b78d-396a1dc373af" (UID: "d81edbe4-5c43-454c-b78d-396a1dc373af"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.000348 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d81edbe4-5c43-454c-b78d-396a1dc373af-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "d81edbe4-5c43-454c-b78d-396a1dc373af" (UID: "d81edbe4-5c43-454c-b78d-396a1dc373af"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.000941 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d81edbe4-5c43-454c-b78d-396a1dc373af-service-ca" (OuterVolumeSpecName: "service-ca") pod "d81edbe4-5c43-454c-b78d-396a1dc373af" (UID: "d81edbe4-5c43-454c-b78d-396a1dc373af"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.001221 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d81edbe4-5c43-454c-b78d-396a1dc373af-console-config" (OuterVolumeSpecName: "console-config") pod "d81edbe4-5c43-454c-b78d-396a1dc373af" (UID: "d81edbe4-5c43-454c-b78d-396a1dc373af"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.010214 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d81edbe4-5c43-454c-b78d-396a1dc373af-kube-api-access-5vtg6" (OuterVolumeSpecName: "kube-api-access-5vtg6") pod "d81edbe4-5c43-454c-b78d-396a1dc373af" (UID: "d81edbe4-5c43-454c-b78d-396a1dc373af"). InnerVolumeSpecName "kube-api-access-5vtg6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.023638 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d81edbe4-5c43-454c-b78d-396a1dc373af-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "d81edbe4-5c43-454c-b78d-396a1dc373af" (UID: "d81edbe4-5c43-454c-b78d-396a1dc373af"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.023924 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d81edbe4-5c43-454c-b78d-396a1dc373af-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "d81edbe4-5c43-454c-b78d-396a1dc373af" (UID: "d81edbe4-5c43-454c-b78d-396a1dc373af"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.101327 4716 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/d81edbe4-5c43-454c-b78d-396a1dc373af-console-oauth-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.101362 4716 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/d81edbe4-5c43-454c-b78d-396a1dc373af-console-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.101374 4716 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d81edbe4-5c43-454c-b78d-396a1dc373af-service-ca\") on node \"crc\" DevicePath \"\"" Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.101382 4716 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d81edbe4-5c43-454c-b78d-396a1dc373af-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.101393 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5vtg6\" (UniqueName: \"kubernetes.io/projected/d81edbe4-5c43-454c-b78d-396a1dc373af-kube-api-access-5vtg6\") on node \"crc\" DevicePath \"\"" Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.101404 4716 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/d81edbe4-5c43-454c-b78d-396a1dc373af-console-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.101412 4716 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/d81edbe4-5c43-454c-b78d-396a1dc373af-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.386316 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-76868b5959-ctmcp_d81edbe4-5c43-454c-b78d-396a1dc373af/console/0.log" Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.386789 4716 generic.go:334] "Generic (PLEG): container finished" podID="d81edbe4-5c43-454c-b78d-396a1dc373af" containerID="56b9298e6de7ff2f0946284b391239362bd9ae73baa2c16bf077f09b52b4d298" exitCode=2 Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.386839 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-76868b5959-ctmcp" event={"ID":"d81edbe4-5c43-454c-b78d-396a1dc373af","Type":"ContainerDied","Data":"56b9298e6de7ff2f0946284b391239362bd9ae73baa2c16bf077f09b52b4d298"} Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.386856 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-76868b5959-ctmcp" Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.386877 4716 scope.go:117] "RemoveContainer" containerID="56b9298e6de7ff2f0946284b391239362bd9ae73baa2c16bf077f09b52b4d298" Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.386865 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-76868b5959-ctmcp" event={"ID":"d81edbe4-5c43-454c-b78d-396a1dc373af","Type":"ContainerDied","Data":"10ddef4f25fdbd5cf18a73429066a9f40f191b016003726b9888d796f1c5aac8"} Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.409088 4716 scope.go:117] "RemoveContainer" containerID="56b9298e6de7ff2f0946284b391239362bd9ae73baa2c16bf077f09b52b4d298" Dec 09 15:26:09 crc kubenswrapper[4716]: E1209 15:26:09.409512 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"56b9298e6de7ff2f0946284b391239362bd9ae73baa2c16bf077f09b52b4d298\": container with ID starting with 56b9298e6de7ff2f0946284b391239362bd9ae73baa2c16bf077f09b52b4d298 not found: ID does not exist" containerID="56b9298e6de7ff2f0946284b391239362bd9ae73baa2c16bf077f09b52b4d298" Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.409550 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56b9298e6de7ff2f0946284b391239362bd9ae73baa2c16bf077f09b52b4d298"} err="failed to get container status \"56b9298e6de7ff2f0946284b391239362bd9ae73baa2c16bf077f09b52b4d298\": rpc error: code = NotFound desc = could not find container \"56b9298e6de7ff2f0946284b391239362bd9ae73baa2c16bf077f09b52b4d298\": container with ID starting with 56b9298e6de7ff2f0946284b391239362bd9ae73baa2c16bf077f09b52b4d298 not found: ID does not exist" Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.416465 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-76868b5959-ctmcp"] Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.431268 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-76868b5959-ctmcp"] Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.441348 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f839rhj4"] Dec 09 15:26:09 crc kubenswrapper[4716]: E1209 15:26:09.442074 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d81edbe4-5c43-454c-b78d-396a1dc373af" containerName="console" Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.442215 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="d81edbe4-5c43-454c-b78d-396a1dc373af" containerName="console" Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.442491 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="d81edbe4-5c43-454c-b78d-396a1dc373af" containerName="console" Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.444064 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f839rhj4" Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.447409 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f839rhj4"] Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.450126 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.610577 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f6b71818-3fd7-43c7-9603-cdec6e1fe6b4-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f839rhj4\" (UID: \"f6b71818-3fd7-43c7-9603-cdec6e1fe6b4\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f839rhj4" Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.610790 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f6b71818-3fd7-43c7-9603-cdec6e1fe6b4-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f839rhj4\" (UID: \"f6b71818-3fd7-43c7-9603-cdec6e1fe6b4\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f839rhj4" Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.610969 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wrbnx\" (UniqueName: \"kubernetes.io/projected/f6b71818-3fd7-43c7-9603-cdec6e1fe6b4-kube-api-access-wrbnx\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f839rhj4\" (UID: \"f6b71818-3fd7-43c7-9603-cdec6e1fe6b4\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f839rhj4" Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.713160 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f6b71818-3fd7-43c7-9603-cdec6e1fe6b4-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f839rhj4\" (UID: \"f6b71818-3fd7-43c7-9603-cdec6e1fe6b4\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f839rhj4" Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.713232 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wrbnx\" (UniqueName: \"kubernetes.io/projected/f6b71818-3fd7-43c7-9603-cdec6e1fe6b4-kube-api-access-wrbnx\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f839rhj4\" (UID: \"f6b71818-3fd7-43c7-9603-cdec6e1fe6b4\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f839rhj4" Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.713292 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f6b71818-3fd7-43c7-9603-cdec6e1fe6b4-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f839rhj4\" (UID: \"f6b71818-3fd7-43c7-9603-cdec6e1fe6b4\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f839rhj4" Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.713840 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f6b71818-3fd7-43c7-9603-cdec6e1fe6b4-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f839rhj4\" (UID: \"f6b71818-3fd7-43c7-9603-cdec6e1fe6b4\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f839rhj4" Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.713882 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f6b71818-3fd7-43c7-9603-cdec6e1fe6b4-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f839rhj4\" (UID: \"f6b71818-3fd7-43c7-9603-cdec6e1fe6b4\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f839rhj4" Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.733585 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wrbnx\" (UniqueName: \"kubernetes.io/projected/f6b71818-3fd7-43c7-9603-cdec6e1fe6b4-kube-api-access-wrbnx\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f839rhj4\" (UID: \"f6b71818-3fd7-43c7-9603-cdec6e1fe6b4\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f839rhj4" Dec 09 15:26:09 crc kubenswrapper[4716]: I1209 15:26:09.769359 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f839rhj4" Dec 09 15:26:10 crc kubenswrapper[4716]: I1209 15:26:10.169249 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f839rhj4"] Dec 09 15:26:10 crc kubenswrapper[4716]: I1209 15:26:10.396692 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f839rhj4" event={"ID":"f6b71818-3fd7-43c7-9603-cdec6e1fe6b4","Type":"ContainerStarted","Data":"9d4e2e036983fd9120f23bd7e8165e0caaafbf17c3842698af917f84637825aa"} Dec 09 15:26:11 crc kubenswrapper[4716]: I1209 15:26:11.222710 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d81edbe4-5c43-454c-b78d-396a1dc373af" path="/var/lib/kubelet/pods/d81edbe4-5c43-454c-b78d-396a1dc373af/volumes" Dec 09 15:26:11 crc kubenswrapper[4716]: I1209 15:26:11.407329 4716 generic.go:334] "Generic (PLEG): container finished" podID="f6b71818-3fd7-43c7-9603-cdec6e1fe6b4" containerID="bb64448843d20b83e2b23fe174ce3d37674b6afd394d69edfb071318564491db" exitCode=0 Dec 09 15:26:11 crc kubenswrapper[4716]: I1209 15:26:11.407371 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f839rhj4" event={"ID":"f6b71818-3fd7-43c7-9603-cdec6e1fe6b4","Type":"ContainerDied","Data":"bb64448843d20b83e2b23fe174ce3d37674b6afd394d69edfb071318564491db"} Dec 09 15:26:13 crc kubenswrapper[4716]: I1209 15:26:13.421187 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f839rhj4" event={"ID":"f6b71818-3fd7-43c7-9603-cdec6e1fe6b4","Type":"ContainerStarted","Data":"c288b8d3c10b26cfc7ab7b42e6041867efdf5e38f5b87528c649aa70be436ae0"} Dec 09 15:26:14 crc kubenswrapper[4716]: I1209 15:26:14.428828 4716 generic.go:334] "Generic (PLEG): container finished" podID="f6b71818-3fd7-43c7-9603-cdec6e1fe6b4" containerID="c288b8d3c10b26cfc7ab7b42e6041867efdf5e38f5b87528c649aa70be436ae0" exitCode=0 Dec 09 15:26:14 crc kubenswrapper[4716]: I1209 15:26:14.428882 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f839rhj4" event={"ID":"f6b71818-3fd7-43c7-9603-cdec6e1fe6b4","Type":"ContainerDied","Data":"c288b8d3c10b26cfc7ab7b42e6041867efdf5e38f5b87528c649aa70be436ae0"} Dec 09 15:26:15 crc kubenswrapper[4716]: I1209 15:26:15.437423 4716 generic.go:334] "Generic (PLEG): container finished" podID="f6b71818-3fd7-43c7-9603-cdec6e1fe6b4" containerID="d78e9fa9812f5b2c9f90b8703a0fe3f71a8148618b2202295d50ca0500f44f1a" exitCode=0 Dec 09 15:26:15 crc kubenswrapper[4716]: I1209 15:26:15.437497 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f839rhj4" event={"ID":"f6b71818-3fd7-43c7-9603-cdec6e1fe6b4","Type":"ContainerDied","Data":"d78e9fa9812f5b2c9f90b8703a0fe3f71a8148618b2202295d50ca0500f44f1a"} Dec 09 15:26:16 crc kubenswrapper[4716]: I1209 15:26:16.710686 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f839rhj4" Dec 09 15:26:16 crc kubenswrapper[4716]: I1209 15:26:16.838572 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f6b71818-3fd7-43c7-9603-cdec6e1fe6b4-util\") pod \"f6b71818-3fd7-43c7-9603-cdec6e1fe6b4\" (UID: \"f6b71818-3fd7-43c7-9603-cdec6e1fe6b4\") " Dec 09 15:26:16 crc kubenswrapper[4716]: I1209 15:26:16.838674 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f6b71818-3fd7-43c7-9603-cdec6e1fe6b4-bundle\") pod \"f6b71818-3fd7-43c7-9603-cdec6e1fe6b4\" (UID: \"f6b71818-3fd7-43c7-9603-cdec6e1fe6b4\") " Dec 09 15:26:16 crc kubenswrapper[4716]: I1209 15:26:16.838723 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wrbnx\" (UniqueName: \"kubernetes.io/projected/f6b71818-3fd7-43c7-9603-cdec6e1fe6b4-kube-api-access-wrbnx\") pod \"f6b71818-3fd7-43c7-9603-cdec6e1fe6b4\" (UID: \"f6b71818-3fd7-43c7-9603-cdec6e1fe6b4\") " Dec 09 15:26:16 crc kubenswrapper[4716]: I1209 15:26:16.839897 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f6b71818-3fd7-43c7-9603-cdec6e1fe6b4-bundle" (OuterVolumeSpecName: "bundle") pod "f6b71818-3fd7-43c7-9603-cdec6e1fe6b4" (UID: "f6b71818-3fd7-43c7-9603-cdec6e1fe6b4"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:26:16 crc kubenswrapper[4716]: I1209 15:26:16.845102 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f6b71818-3fd7-43c7-9603-cdec6e1fe6b4-kube-api-access-wrbnx" (OuterVolumeSpecName: "kube-api-access-wrbnx") pod "f6b71818-3fd7-43c7-9603-cdec6e1fe6b4" (UID: "f6b71818-3fd7-43c7-9603-cdec6e1fe6b4"). InnerVolumeSpecName "kube-api-access-wrbnx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:26:16 crc kubenswrapper[4716]: I1209 15:26:16.848920 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f6b71818-3fd7-43c7-9603-cdec6e1fe6b4-util" (OuterVolumeSpecName: "util") pod "f6b71818-3fd7-43c7-9603-cdec6e1fe6b4" (UID: "f6b71818-3fd7-43c7-9603-cdec6e1fe6b4"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:26:16 crc kubenswrapper[4716]: I1209 15:26:16.941071 4716 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f6b71818-3fd7-43c7-9603-cdec6e1fe6b4-util\") on node \"crc\" DevicePath \"\"" Dec 09 15:26:16 crc kubenswrapper[4716]: I1209 15:26:16.941131 4716 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f6b71818-3fd7-43c7-9603-cdec6e1fe6b4-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:26:16 crc kubenswrapper[4716]: I1209 15:26:16.941148 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wrbnx\" (UniqueName: \"kubernetes.io/projected/f6b71818-3fd7-43c7-9603-cdec6e1fe6b4-kube-api-access-wrbnx\") on node \"crc\" DevicePath \"\"" Dec 09 15:26:17 crc kubenswrapper[4716]: I1209 15:26:17.459114 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f839rhj4" event={"ID":"f6b71818-3fd7-43c7-9603-cdec6e1fe6b4","Type":"ContainerDied","Data":"9d4e2e036983fd9120f23bd7e8165e0caaafbf17c3842698af917f84637825aa"} Dec 09 15:26:17 crc kubenswrapper[4716]: I1209 15:26:17.459181 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9d4e2e036983fd9120f23bd7e8165e0caaafbf17c3842698af917f84637825aa" Dec 09 15:26:17 crc kubenswrapper[4716]: I1209 15:26:17.459194 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f839rhj4" Dec 09 15:26:17 crc kubenswrapper[4716]: I1209 15:26:17.923314 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 15:26:17 crc kubenswrapper[4716]: I1209 15:26:17.924182 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 15:26:17 crc kubenswrapper[4716]: I1209 15:26:17.924291 4716 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" Dec 09 15:26:17 crc kubenswrapper[4716]: I1209 15:26:17.925063 4716 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"34321690e545d4c8bbe42a5bb706305ba7bf7764ef04b070ddeba60a6895e655"} pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 15:26:17 crc kubenswrapper[4716]: I1209 15:26:17.925199 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" containerID="cri-o://34321690e545d4c8bbe42a5bb706305ba7bf7764ef04b070ddeba60a6895e655" gracePeriod=600 Dec 09 15:26:18 crc kubenswrapper[4716]: I1209 15:26:18.470205 4716 generic.go:334] "Generic (PLEG): container finished" podID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerID="34321690e545d4c8bbe42a5bb706305ba7bf7764ef04b070ddeba60a6895e655" exitCode=0 Dec 09 15:26:18 crc kubenswrapper[4716]: I1209 15:26:18.470291 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerDied","Data":"34321690e545d4c8bbe42a5bb706305ba7bf7764ef04b070ddeba60a6895e655"} Dec 09 15:26:18 crc kubenswrapper[4716]: I1209 15:26:18.470596 4716 scope.go:117] "RemoveContainer" containerID="926bc2d973bc4613f2c81809b0dc284f4ef49b03a208dfa2c446ce01a7ee38bc" Dec 09 15:26:19 crc kubenswrapper[4716]: I1209 15:26:19.480437 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerStarted","Data":"51f7652ef00c2c1bb2d4496bda7b4d5fed9d9a21f2c7f785c34588994268c9b1"} Dec 09 15:26:28 crc kubenswrapper[4716]: I1209 15:26:28.067167 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-5655658545-cjrmh"] Dec 09 15:26:28 crc kubenswrapper[4716]: E1209 15:26:28.068027 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6b71818-3fd7-43c7-9603-cdec6e1fe6b4" containerName="pull" Dec 09 15:26:28 crc kubenswrapper[4716]: I1209 15:26:28.068043 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6b71818-3fd7-43c7-9603-cdec6e1fe6b4" containerName="pull" Dec 09 15:26:28 crc kubenswrapper[4716]: E1209 15:26:28.068061 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6b71818-3fd7-43c7-9603-cdec6e1fe6b4" containerName="extract" Dec 09 15:26:28 crc kubenswrapper[4716]: I1209 15:26:28.068069 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6b71818-3fd7-43c7-9603-cdec6e1fe6b4" containerName="extract" Dec 09 15:26:28 crc kubenswrapper[4716]: E1209 15:26:28.068081 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6b71818-3fd7-43c7-9603-cdec6e1fe6b4" containerName="util" Dec 09 15:26:28 crc kubenswrapper[4716]: I1209 15:26:28.068089 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6b71818-3fd7-43c7-9603-cdec6e1fe6b4" containerName="util" Dec 09 15:26:28 crc kubenswrapper[4716]: I1209 15:26:28.068251 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6b71818-3fd7-43c7-9603-cdec6e1fe6b4" containerName="extract" Dec 09 15:26:28 crc kubenswrapper[4716]: I1209 15:26:28.068841 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-5655658545-cjrmh" Dec 09 15:26:28 crc kubenswrapper[4716]: I1209 15:26:28.072501 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Dec 09 15:26:28 crc kubenswrapper[4716]: I1209 15:26:28.072565 4716 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Dec 09 15:26:28 crc kubenswrapper[4716]: I1209 15:26:28.074209 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Dec 09 15:26:28 crc kubenswrapper[4716]: I1209 15:26:28.074279 4716 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-kqzd5" Dec 09 15:26:28 crc kubenswrapper[4716]: I1209 15:26:28.074316 4716 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Dec 09 15:26:28 crc kubenswrapper[4716]: I1209 15:26:28.090029 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-5655658545-cjrmh"] Dec 09 15:26:28 crc kubenswrapper[4716]: I1209 15:26:28.179009 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/e79da35c-ab1c-40ac-89b1-4406af3ff5f5-webhook-cert\") pod \"metallb-operator-controller-manager-5655658545-cjrmh\" (UID: \"e79da35c-ab1c-40ac-89b1-4406af3ff5f5\") " pod="metallb-system/metallb-operator-controller-manager-5655658545-cjrmh" Dec 09 15:26:28 crc kubenswrapper[4716]: I1209 15:26:28.179218 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5nvjt\" (UniqueName: \"kubernetes.io/projected/e79da35c-ab1c-40ac-89b1-4406af3ff5f5-kube-api-access-5nvjt\") pod \"metallb-operator-controller-manager-5655658545-cjrmh\" (UID: \"e79da35c-ab1c-40ac-89b1-4406af3ff5f5\") " pod="metallb-system/metallb-operator-controller-manager-5655658545-cjrmh" Dec 09 15:26:28 crc kubenswrapper[4716]: I1209 15:26:28.179347 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/e79da35c-ab1c-40ac-89b1-4406af3ff5f5-apiservice-cert\") pod \"metallb-operator-controller-manager-5655658545-cjrmh\" (UID: \"e79da35c-ab1c-40ac-89b1-4406af3ff5f5\") " pod="metallb-system/metallb-operator-controller-manager-5655658545-cjrmh" Dec 09 15:26:28 crc kubenswrapper[4716]: I1209 15:26:28.482902 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/e79da35c-ab1c-40ac-89b1-4406af3ff5f5-apiservice-cert\") pod \"metallb-operator-controller-manager-5655658545-cjrmh\" (UID: \"e79da35c-ab1c-40ac-89b1-4406af3ff5f5\") " pod="metallb-system/metallb-operator-controller-manager-5655658545-cjrmh" Dec 09 15:26:28 crc kubenswrapper[4716]: I1209 15:26:28.483042 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/e79da35c-ab1c-40ac-89b1-4406af3ff5f5-webhook-cert\") pod \"metallb-operator-controller-manager-5655658545-cjrmh\" (UID: \"e79da35c-ab1c-40ac-89b1-4406af3ff5f5\") " pod="metallb-system/metallb-operator-controller-manager-5655658545-cjrmh" Dec 09 15:26:28 crc kubenswrapper[4716]: I1209 15:26:28.483145 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5nvjt\" (UniqueName: \"kubernetes.io/projected/e79da35c-ab1c-40ac-89b1-4406af3ff5f5-kube-api-access-5nvjt\") pod \"metallb-operator-controller-manager-5655658545-cjrmh\" (UID: \"e79da35c-ab1c-40ac-89b1-4406af3ff5f5\") " pod="metallb-system/metallb-operator-controller-manager-5655658545-cjrmh" Dec 09 15:26:28 crc kubenswrapper[4716]: I1209 15:26:28.497229 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/e79da35c-ab1c-40ac-89b1-4406af3ff5f5-apiservice-cert\") pod \"metallb-operator-controller-manager-5655658545-cjrmh\" (UID: \"e79da35c-ab1c-40ac-89b1-4406af3ff5f5\") " pod="metallb-system/metallb-operator-controller-manager-5655658545-cjrmh" Dec 09 15:26:28 crc kubenswrapper[4716]: I1209 15:26:28.522771 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/e79da35c-ab1c-40ac-89b1-4406af3ff5f5-webhook-cert\") pod \"metallb-operator-controller-manager-5655658545-cjrmh\" (UID: \"e79da35c-ab1c-40ac-89b1-4406af3ff5f5\") " pod="metallb-system/metallb-operator-controller-manager-5655658545-cjrmh" Dec 09 15:26:28 crc kubenswrapper[4716]: I1209 15:26:28.525577 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5nvjt\" (UniqueName: \"kubernetes.io/projected/e79da35c-ab1c-40ac-89b1-4406af3ff5f5-kube-api-access-5nvjt\") pod \"metallb-operator-controller-manager-5655658545-cjrmh\" (UID: \"e79da35c-ab1c-40ac-89b1-4406af3ff5f5\") " pod="metallb-system/metallb-operator-controller-manager-5655658545-cjrmh" Dec 09 15:26:28 crc kubenswrapper[4716]: I1209 15:26:28.631151 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-65998d95b4-8s5d4"] Dec 09 15:26:28 crc kubenswrapper[4716]: I1209 15:26:28.633687 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-65998d95b4-8s5d4" Dec 09 15:26:28 crc kubenswrapper[4716]: I1209 15:26:28.637344 4716 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-fs27v" Dec 09 15:26:28 crc kubenswrapper[4716]: I1209 15:26:28.637539 4716 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Dec 09 15:26:28 crc kubenswrapper[4716]: I1209 15:26:28.638268 4716 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Dec 09 15:26:28 crc kubenswrapper[4716]: I1209 15:26:28.648925 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-65998d95b4-8s5d4"] Dec 09 15:26:28 crc kubenswrapper[4716]: I1209 15:26:28.687265 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mjsj2\" (UniqueName: \"kubernetes.io/projected/2a6158db-9b91-48c4-a8fb-610541de1ebe-kube-api-access-mjsj2\") pod \"metallb-operator-webhook-server-65998d95b4-8s5d4\" (UID: \"2a6158db-9b91-48c4-a8fb-610541de1ebe\") " pod="metallb-system/metallb-operator-webhook-server-65998d95b4-8s5d4" Dec 09 15:26:28 crc kubenswrapper[4716]: I1209 15:26:28.687337 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2a6158db-9b91-48c4-a8fb-610541de1ebe-webhook-cert\") pod \"metallb-operator-webhook-server-65998d95b4-8s5d4\" (UID: \"2a6158db-9b91-48c4-a8fb-610541de1ebe\") " pod="metallb-system/metallb-operator-webhook-server-65998d95b4-8s5d4" Dec 09 15:26:28 crc kubenswrapper[4716]: I1209 15:26:28.687663 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2a6158db-9b91-48c4-a8fb-610541de1ebe-apiservice-cert\") pod \"metallb-operator-webhook-server-65998d95b4-8s5d4\" (UID: \"2a6158db-9b91-48c4-a8fb-610541de1ebe\") " pod="metallb-system/metallb-operator-webhook-server-65998d95b4-8s5d4" Dec 09 15:26:28 crc kubenswrapper[4716]: I1209 15:26:28.690847 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-5655658545-cjrmh" Dec 09 15:26:28 crc kubenswrapper[4716]: I1209 15:26:28.815123 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mjsj2\" (UniqueName: \"kubernetes.io/projected/2a6158db-9b91-48c4-a8fb-610541de1ebe-kube-api-access-mjsj2\") pod \"metallb-operator-webhook-server-65998d95b4-8s5d4\" (UID: \"2a6158db-9b91-48c4-a8fb-610541de1ebe\") " pod="metallb-system/metallb-operator-webhook-server-65998d95b4-8s5d4" Dec 09 15:26:28 crc kubenswrapper[4716]: I1209 15:26:28.815208 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2a6158db-9b91-48c4-a8fb-610541de1ebe-webhook-cert\") pod \"metallb-operator-webhook-server-65998d95b4-8s5d4\" (UID: \"2a6158db-9b91-48c4-a8fb-610541de1ebe\") " pod="metallb-system/metallb-operator-webhook-server-65998d95b4-8s5d4" Dec 09 15:26:28 crc kubenswrapper[4716]: I1209 15:26:28.815327 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2a6158db-9b91-48c4-a8fb-610541de1ebe-apiservice-cert\") pod \"metallb-operator-webhook-server-65998d95b4-8s5d4\" (UID: \"2a6158db-9b91-48c4-a8fb-610541de1ebe\") " pod="metallb-system/metallb-operator-webhook-server-65998d95b4-8s5d4" Dec 09 15:26:28 crc kubenswrapper[4716]: I1209 15:26:28.832115 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2a6158db-9b91-48c4-a8fb-610541de1ebe-apiservice-cert\") pod \"metallb-operator-webhook-server-65998d95b4-8s5d4\" (UID: \"2a6158db-9b91-48c4-a8fb-610541de1ebe\") " pod="metallb-system/metallb-operator-webhook-server-65998d95b4-8s5d4" Dec 09 15:26:28 crc kubenswrapper[4716]: I1209 15:26:28.837485 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2a6158db-9b91-48c4-a8fb-610541de1ebe-webhook-cert\") pod \"metallb-operator-webhook-server-65998d95b4-8s5d4\" (UID: \"2a6158db-9b91-48c4-a8fb-610541de1ebe\") " pod="metallb-system/metallb-operator-webhook-server-65998d95b4-8s5d4" Dec 09 15:26:28 crc kubenswrapper[4716]: I1209 15:26:28.846947 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mjsj2\" (UniqueName: \"kubernetes.io/projected/2a6158db-9b91-48c4-a8fb-610541de1ebe-kube-api-access-mjsj2\") pod \"metallb-operator-webhook-server-65998d95b4-8s5d4\" (UID: \"2a6158db-9b91-48c4-a8fb-610541de1ebe\") " pod="metallb-system/metallb-operator-webhook-server-65998d95b4-8s5d4" Dec 09 15:26:28 crc kubenswrapper[4716]: I1209 15:26:28.968598 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-65998d95b4-8s5d4" Dec 09 15:26:30 crc kubenswrapper[4716]: I1209 15:26:30.109325 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-5655658545-cjrmh"] Dec 09 15:26:30 crc kubenswrapper[4716]: W1209 15:26:30.115087 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode79da35c_ab1c_40ac_89b1_4406af3ff5f5.slice/crio-dfe60ea7f07d082b37072265ef0698f1258c9b91c69ddd19d85c19dee7b99604 WatchSource:0}: Error finding container dfe60ea7f07d082b37072265ef0698f1258c9b91c69ddd19d85c19dee7b99604: Status 404 returned error can't find the container with id dfe60ea7f07d082b37072265ef0698f1258c9b91c69ddd19d85c19dee7b99604 Dec 09 15:26:30 crc kubenswrapper[4716]: I1209 15:26:30.348863 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-65998d95b4-8s5d4"] Dec 09 15:26:30 crc kubenswrapper[4716]: W1209 15:26:30.364499 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2a6158db_9b91_48c4_a8fb_610541de1ebe.slice/crio-2c9de535084141ef984d5a05fcafe66b4c53bea1e0b1c4f048c5de7f8549729f WatchSource:0}: Error finding container 2c9de535084141ef984d5a05fcafe66b4c53bea1e0b1c4f048c5de7f8549729f: Status 404 returned error can't find the container with id 2c9de535084141ef984d5a05fcafe66b4c53bea1e0b1c4f048c5de7f8549729f Dec 09 15:26:31 crc kubenswrapper[4716]: I1209 15:26:31.098702 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-5655658545-cjrmh" event={"ID":"e79da35c-ab1c-40ac-89b1-4406af3ff5f5","Type":"ContainerStarted","Data":"dfe60ea7f07d082b37072265ef0698f1258c9b91c69ddd19d85c19dee7b99604"} Dec 09 15:26:31 crc kubenswrapper[4716]: I1209 15:26:31.099993 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-65998d95b4-8s5d4" event={"ID":"2a6158db-9b91-48c4-a8fb-610541de1ebe","Type":"ContainerStarted","Data":"2c9de535084141ef984d5a05fcafe66b4c53bea1e0b1c4f048c5de7f8549729f"} Dec 09 15:26:36 crc kubenswrapper[4716]: I1209 15:26:36.140731 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-5655658545-cjrmh" event={"ID":"e79da35c-ab1c-40ac-89b1-4406af3ff5f5","Type":"ContainerStarted","Data":"37dac1c9df54e30fbe8f7a5dc93022f8852970abb7cd3abfe81b0180a41d84bc"} Dec 09 15:26:36 crc kubenswrapper[4716]: I1209 15:26:36.141679 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-5655658545-cjrmh" Dec 09 15:26:36 crc kubenswrapper[4716]: I1209 15:26:36.142929 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-65998d95b4-8s5d4" event={"ID":"2a6158db-9b91-48c4-a8fb-610541de1ebe","Type":"ContainerStarted","Data":"c4a37bcebb41693d111601b11da4599856b827d25687498863e706a43cd5bde0"} Dec 09 15:26:36 crc kubenswrapper[4716]: I1209 15:26:36.143410 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-65998d95b4-8s5d4" Dec 09 15:26:36 crc kubenswrapper[4716]: I1209 15:26:36.162943 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-5655658545-cjrmh" podStartSLOduration=2.370634373 podStartE2EDuration="8.162923862s" podCreationTimestamp="2025-12-09 15:26:28 +0000 UTC" firstStartedPulling="2025-12-09 15:26:30.118858636 +0000 UTC m=+1077.273602624" lastFinishedPulling="2025-12-09 15:26:35.911148125 +0000 UTC m=+1083.065892113" observedRunningTime="2025-12-09 15:26:36.160744151 +0000 UTC m=+1083.315488159" watchObservedRunningTime="2025-12-09 15:26:36.162923862 +0000 UTC m=+1083.317667850" Dec 09 15:26:36 crc kubenswrapper[4716]: I1209 15:26:36.214600 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-65998d95b4-8s5d4" podStartSLOduration=2.646045381 podStartE2EDuration="8.214581017s" podCreationTimestamp="2025-12-09 15:26:28 +0000 UTC" firstStartedPulling="2025-12-09 15:26:30.366389733 +0000 UTC m=+1077.521133721" lastFinishedPulling="2025-12-09 15:26:35.934925369 +0000 UTC m=+1083.089669357" observedRunningTime="2025-12-09 15:26:36.200164308 +0000 UTC m=+1083.354908306" watchObservedRunningTime="2025-12-09 15:26:36.214581017 +0000 UTC m=+1083.369325005" Dec 09 15:26:48 crc kubenswrapper[4716]: I1209 15:26:48.977661 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-65998d95b4-8s5d4" Dec 09 15:27:08 crc kubenswrapper[4716]: I1209 15:27:08.693600 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-5655658545-cjrmh" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.453609 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-ztmht"] Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.457144 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-ztmht" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.459230 4716 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-d7bh9" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.459257 4716 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.459543 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.463785 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-t252v"] Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.464815 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-t252v" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.466912 4716 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.490676 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-t252v"] Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.566484 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-z4t47"] Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.569199 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-z4t47" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.574803 4716 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.574843 4716 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-xvgx7" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.574861 4716 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.574806 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.596824 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-f8648f98b-8gjtr"] Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.598526 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-f8648f98b-8gjtr" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.601731 4716 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.613546 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/e2caed4f-2e2a-4658-8705-49c1b6d66492-frr-startup\") pod \"frr-k8s-ztmht\" (UID: \"e2caed4f-2e2a-4658-8705-49c1b6d66492\") " pod="metallb-system/frr-k8s-ztmht" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.613612 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e2caed4f-2e2a-4658-8705-49c1b6d66492-metrics-certs\") pod \"frr-k8s-ztmht\" (UID: \"e2caed4f-2e2a-4658-8705-49c1b6d66492\") " pod="metallb-system/frr-k8s-ztmht" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.613679 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tqhl6\" (UniqueName: \"kubernetes.io/projected/8d5472cc-28f8-4887-97d5-b428c89a1730-kube-api-access-tqhl6\") pod \"frr-k8s-webhook-server-7fcb986d4-t252v\" (UID: \"8d5472cc-28f8-4887-97d5-b428c89a1730\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-t252v" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.613710 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lmpxd\" (UniqueName: \"kubernetes.io/projected/e2caed4f-2e2a-4658-8705-49c1b6d66492-kube-api-access-lmpxd\") pod \"frr-k8s-ztmht\" (UID: \"e2caed4f-2e2a-4658-8705-49c1b6d66492\") " pod="metallb-system/frr-k8s-ztmht" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.613726 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8d5472cc-28f8-4887-97d5-b428c89a1730-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-t252v\" (UID: \"8d5472cc-28f8-4887-97d5-b428c89a1730\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-t252v" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.613788 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/e2caed4f-2e2a-4658-8705-49c1b6d66492-frr-sockets\") pod \"frr-k8s-ztmht\" (UID: \"e2caed4f-2e2a-4658-8705-49c1b6d66492\") " pod="metallb-system/frr-k8s-ztmht" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.613807 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/e2caed4f-2e2a-4658-8705-49c1b6d66492-reloader\") pod \"frr-k8s-ztmht\" (UID: \"e2caed4f-2e2a-4658-8705-49c1b6d66492\") " pod="metallb-system/frr-k8s-ztmht" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.613827 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/e2caed4f-2e2a-4658-8705-49c1b6d66492-frr-conf\") pod \"frr-k8s-ztmht\" (UID: \"e2caed4f-2e2a-4658-8705-49c1b6d66492\") " pod="metallb-system/frr-k8s-ztmht" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.613848 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/e2caed4f-2e2a-4658-8705-49c1b6d66492-metrics\") pod \"frr-k8s-ztmht\" (UID: \"e2caed4f-2e2a-4658-8705-49c1b6d66492\") " pod="metallb-system/frr-k8s-ztmht" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.621478 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-f8648f98b-8gjtr"] Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.715509 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c34d08f8-624c-47b0-9373-f96e9f280c74-metrics-certs\") pod \"controller-f8648f98b-8gjtr\" (UID: \"c34d08f8-624c-47b0-9373-f96e9f280c74\") " pod="metallb-system/controller-f8648f98b-8gjtr" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.715553 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d-memberlist\") pod \"speaker-z4t47\" (UID: \"59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d\") " pod="metallb-system/speaker-z4t47" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.715576 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zb9ct\" (UniqueName: \"kubernetes.io/projected/c34d08f8-624c-47b0-9373-f96e9f280c74-kube-api-access-zb9ct\") pod \"controller-f8648f98b-8gjtr\" (UID: \"c34d08f8-624c-47b0-9373-f96e9f280c74\") " pod="metallb-system/controller-f8648f98b-8gjtr" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.715608 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/e2caed4f-2e2a-4658-8705-49c1b6d66492-frr-startup\") pod \"frr-k8s-ztmht\" (UID: \"e2caed4f-2e2a-4658-8705-49c1b6d66492\") " pod="metallb-system/frr-k8s-ztmht" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.715822 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e2caed4f-2e2a-4658-8705-49c1b6d66492-metrics-certs\") pod \"frr-k8s-ztmht\" (UID: \"e2caed4f-2e2a-4658-8705-49c1b6d66492\") " pod="metallb-system/frr-k8s-ztmht" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.715859 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w4nmf\" (UniqueName: \"kubernetes.io/projected/59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d-kube-api-access-w4nmf\") pod \"speaker-z4t47\" (UID: \"59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d\") " pod="metallb-system/speaker-z4t47" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.715885 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tqhl6\" (UniqueName: \"kubernetes.io/projected/8d5472cc-28f8-4887-97d5-b428c89a1730-kube-api-access-tqhl6\") pod \"frr-k8s-webhook-server-7fcb986d4-t252v\" (UID: \"8d5472cc-28f8-4887-97d5-b428c89a1730\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-t252v" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.715908 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lmpxd\" (UniqueName: \"kubernetes.io/projected/e2caed4f-2e2a-4658-8705-49c1b6d66492-kube-api-access-lmpxd\") pod \"frr-k8s-ztmht\" (UID: \"e2caed4f-2e2a-4658-8705-49c1b6d66492\") " pod="metallb-system/frr-k8s-ztmht" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.715931 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8d5472cc-28f8-4887-97d5-b428c89a1730-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-t252v\" (UID: \"8d5472cc-28f8-4887-97d5-b428c89a1730\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-t252v" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.715947 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d-metrics-certs\") pod \"speaker-z4t47\" (UID: \"59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d\") " pod="metallb-system/speaker-z4t47" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.715974 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d-metallb-excludel2\") pod \"speaker-z4t47\" (UID: \"59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d\") " pod="metallb-system/speaker-z4t47" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.715997 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c34d08f8-624c-47b0-9373-f96e9f280c74-cert\") pod \"controller-f8648f98b-8gjtr\" (UID: \"c34d08f8-624c-47b0-9373-f96e9f280c74\") " pod="metallb-system/controller-f8648f98b-8gjtr" Dec 09 15:27:09 crc kubenswrapper[4716]: E1209 15:27:09.716017 4716 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Dec 09 15:27:09 crc kubenswrapper[4716]: E1209 15:27:09.716076 4716 secret.go:188] Couldn't get secret metallb-system/frr-k8s-webhook-server-cert: secret "frr-k8s-webhook-server-cert" not found Dec 09 15:27:09 crc kubenswrapper[4716]: E1209 15:27:09.716113 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e2caed4f-2e2a-4658-8705-49c1b6d66492-metrics-certs podName:e2caed4f-2e2a-4658-8705-49c1b6d66492 nodeName:}" failed. No retries permitted until 2025-12-09 15:27:10.216095037 +0000 UTC m=+1117.370839025 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e2caed4f-2e2a-4658-8705-49c1b6d66492-metrics-certs") pod "frr-k8s-ztmht" (UID: "e2caed4f-2e2a-4658-8705-49c1b6d66492") : secret "frr-k8s-certs-secret" not found Dec 09 15:27:09 crc kubenswrapper[4716]: E1209 15:27:09.716149 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8d5472cc-28f8-4887-97d5-b428c89a1730-cert podName:8d5472cc-28f8-4887-97d5-b428c89a1730 nodeName:}" failed. No retries permitted until 2025-12-09 15:27:10.216131908 +0000 UTC m=+1117.370876006 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/8d5472cc-28f8-4887-97d5-b428c89a1730-cert") pod "frr-k8s-webhook-server-7fcb986d4-t252v" (UID: "8d5472cc-28f8-4887-97d5-b428c89a1730") : secret "frr-k8s-webhook-server-cert" not found Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.716317 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/e2caed4f-2e2a-4658-8705-49c1b6d66492-frr-sockets\") pod \"frr-k8s-ztmht\" (UID: \"e2caed4f-2e2a-4658-8705-49c1b6d66492\") " pod="metallb-system/frr-k8s-ztmht" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.716352 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/e2caed4f-2e2a-4658-8705-49c1b6d66492-reloader\") pod \"frr-k8s-ztmht\" (UID: \"e2caed4f-2e2a-4658-8705-49c1b6d66492\") " pod="metallb-system/frr-k8s-ztmht" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.716409 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/e2caed4f-2e2a-4658-8705-49c1b6d66492-frr-conf\") pod \"frr-k8s-ztmht\" (UID: \"e2caed4f-2e2a-4658-8705-49c1b6d66492\") " pod="metallb-system/frr-k8s-ztmht" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.716464 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/e2caed4f-2e2a-4658-8705-49c1b6d66492-metrics\") pod \"frr-k8s-ztmht\" (UID: \"e2caed4f-2e2a-4658-8705-49c1b6d66492\") " pod="metallb-system/frr-k8s-ztmht" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.716710 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/e2caed4f-2e2a-4658-8705-49c1b6d66492-frr-sockets\") pod \"frr-k8s-ztmht\" (UID: \"e2caed4f-2e2a-4658-8705-49c1b6d66492\") " pod="metallb-system/frr-k8s-ztmht" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.716828 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/e2caed4f-2e2a-4658-8705-49c1b6d66492-frr-conf\") pod \"frr-k8s-ztmht\" (UID: \"e2caed4f-2e2a-4658-8705-49c1b6d66492\") " pod="metallb-system/frr-k8s-ztmht" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.717035 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/e2caed4f-2e2a-4658-8705-49c1b6d66492-reloader\") pod \"frr-k8s-ztmht\" (UID: \"e2caed4f-2e2a-4658-8705-49c1b6d66492\") " pod="metallb-system/frr-k8s-ztmht" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.717045 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/e2caed4f-2e2a-4658-8705-49c1b6d66492-metrics\") pod \"frr-k8s-ztmht\" (UID: \"e2caed4f-2e2a-4658-8705-49c1b6d66492\") " pod="metallb-system/frr-k8s-ztmht" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.717095 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/e2caed4f-2e2a-4658-8705-49c1b6d66492-frr-startup\") pod \"frr-k8s-ztmht\" (UID: \"e2caed4f-2e2a-4658-8705-49c1b6d66492\") " pod="metallb-system/frr-k8s-ztmht" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.735506 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tqhl6\" (UniqueName: \"kubernetes.io/projected/8d5472cc-28f8-4887-97d5-b428c89a1730-kube-api-access-tqhl6\") pod \"frr-k8s-webhook-server-7fcb986d4-t252v\" (UID: \"8d5472cc-28f8-4887-97d5-b428c89a1730\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-t252v" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.742139 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lmpxd\" (UniqueName: \"kubernetes.io/projected/e2caed4f-2e2a-4658-8705-49c1b6d66492-kube-api-access-lmpxd\") pod \"frr-k8s-ztmht\" (UID: \"e2caed4f-2e2a-4658-8705-49c1b6d66492\") " pod="metallb-system/frr-k8s-ztmht" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.817849 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w4nmf\" (UniqueName: \"kubernetes.io/projected/59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d-kube-api-access-w4nmf\") pod \"speaker-z4t47\" (UID: \"59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d\") " pod="metallb-system/speaker-z4t47" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.817917 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d-metrics-certs\") pod \"speaker-z4t47\" (UID: \"59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d\") " pod="metallb-system/speaker-z4t47" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.817948 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d-metallb-excludel2\") pod \"speaker-z4t47\" (UID: \"59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d\") " pod="metallb-system/speaker-z4t47" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.817971 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c34d08f8-624c-47b0-9373-f96e9f280c74-cert\") pod \"controller-f8648f98b-8gjtr\" (UID: \"c34d08f8-624c-47b0-9373-f96e9f280c74\") " pod="metallb-system/controller-f8648f98b-8gjtr" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.818045 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c34d08f8-624c-47b0-9373-f96e9f280c74-metrics-certs\") pod \"controller-f8648f98b-8gjtr\" (UID: \"c34d08f8-624c-47b0-9373-f96e9f280c74\") " pod="metallb-system/controller-f8648f98b-8gjtr" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.818062 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d-memberlist\") pod \"speaker-z4t47\" (UID: \"59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d\") " pod="metallb-system/speaker-z4t47" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.818077 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zb9ct\" (UniqueName: \"kubernetes.io/projected/c34d08f8-624c-47b0-9373-f96e9f280c74-kube-api-access-zb9ct\") pod \"controller-f8648f98b-8gjtr\" (UID: \"c34d08f8-624c-47b0-9373-f96e9f280c74\") " pod="metallb-system/controller-f8648f98b-8gjtr" Dec 09 15:27:09 crc kubenswrapper[4716]: E1209 15:27:09.818197 4716 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Dec 09 15:27:09 crc kubenswrapper[4716]: E1209 15:27:09.818267 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d-memberlist podName:59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d nodeName:}" failed. No retries permitted until 2025-12-09 15:27:10.318250282 +0000 UTC m=+1117.472994271 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d-memberlist") pod "speaker-z4t47" (UID: "59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d") : secret "metallb-memberlist" not found Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.818710 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d-metallb-excludel2\") pod \"speaker-z4t47\" (UID: \"59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d\") " pod="metallb-system/speaker-z4t47" Dec 09 15:27:09 crc kubenswrapper[4716]: E1209 15:27:09.818790 4716 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Dec 09 15:27:09 crc kubenswrapper[4716]: E1209 15:27:09.818831 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d-metrics-certs podName:59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d nodeName:}" failed. No retries permitted until 2025-12-09 15:27:10.318822269 +0000 UTC m=+1117.473566257 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d-metrics-certs") pod "speaker-z4t47" (UID: "59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d") : secret "speaker-certs-secret" not found Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.820950 4716 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.821429 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c34d08f8-624c-47b0-9373-f96e9f280c74-metrics-certs\") pod \"controller-f8648f98b-8gjtr\" (UID: \"c34d08f8-624c-47b0-9373-f96e9f280c74\") " pod="metallb-system/controller-f8648f98b-8gjtr" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.832612 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c34d08f8-624c-47b0-9373-f96e9f280c74-cert\") pod \"controller-f8648f98b-8gjtr\" (UID: \"c34d08f8-624c-47b0-9373-f96e9f280c74\") " pod="metallb-system/controller-f8648f98b-8gjtr" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.838179 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w4nmf\" (UniqueName: \"kubernetes.io/projected/59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d-kube-api-access-w4nmf\") pod \"speaker-z4t47\" (UID: \"59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d\") " pod="metallb-system/speaker-z4t47" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.845313 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zb9ct\" (UniqueName: \"kubernetes.io/projected/c34d08f8-624c-47b0-9373-f96e9f280c74-kube-api-access-zb9ct\") pod \"controller-f8648f98b-8gjtr\" (UID: \"c34d08f8-624c-47b0-9373-f96e9f280c74\") " pod="metallb-system/controller-f8648f98b-8gjtr" Dec 09 15:27:09 crc kubenswrapper[4716]: I1209 15:27:09.914092 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-f8648f98b-8gjtr" Dec 09 15:27:10 crc kubenswrapper[4716]: I1209 15:27:10.225863 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e2caed4f-2e2a-4658-8705-49c1b6d66492-metrics-certs\") pod \"frr-k8s-ztmht\" (UID: \"e2caed4f-2e2a-4658-8705-49c1b6d66492\") " pod="metallb-system/frr-k8s-ztmht" Dec 09 15:27:10 crc kubenswrapper[4716]: I1209 15:27:10.226248 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8d5472cc-28f8-4887-97d5-b428c89a1730-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-t252v\" (UID: \"8d5472cc-28f8-4887-97d5-b428c89a1730\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-t252v" Dec 09 15:27:10 crc kubenswrapper[4716]: I1209 15:27:10.232385 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e2caed4f-2e2a-4658-8705-49c1b6d66492-metrics-certs\") pod \"frr-k8s-ztmht\" (UID: \"e2caed4f-2e2a-4658-8705-49c1b6d66492\") " pod="metallb-system/frr-k8s-ztmht" Dec 09 15:27:10 crc kubenswrapper[4716]: I1209 15:27:10.232494 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8d5472cc-28f8-4887-97d5-b428c89a1730-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-t252v\" (UID: \"8d5472cc-28f8-4887-97d5-b428c89a1730\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-t252v" Dec 09 15:27:10 crc kubenswrapper[4716]: I1209 15:27:10.325821 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-f8648f98b-8gjtr"] Dec 09 15:27:10 crc kubenswrapper[4716]: I1209 15:27:10.327583 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d-metrics-certs\") pod \"speaker-z4t47\" (UID: \"59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d\") " pod="metallb-system/speaker-z4t47" Dec 09 15:27:10 crc kubenswrapper[4716]: I1209 15:27:10.327795 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d-memberlist\") pod \"speaker-z4t47\" (UID: \"59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d\") " pod="metallb-system/speaker-z4t47" Dec 09 15:27:10 crc kubenswrapper[4716]: E1209 15:27:10.327929 4716 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Dec 09 15:27:10 crc kubenswrapper[4716]: E1209 15:27:10.328010 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d-memberlist podName:59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d nodeName:}" failed. No retries permitted until 2025-12-09 15:27:11.327989391 +0000 UTC m=+1118.482733379 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d-memberlist") pod "speaker-z4t47" (UID: "59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d") : secret "metallb-memberlist" not found Dec 09 15:27:10 crc kubenswrapper[4716]: I1209 15:27:10.333167 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d-metrics-certs\") pod \"speaker-z4t47\" (UID: \"59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d\") " pod="metallb-system/speaker-z4t47" Dec 09 15:27:10 crc kubenswrapper[4716]: I1209 15:27:10.382889 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-ztmht" Dec 09 15:27:10 crc kubenswrapper[4716]: I1209 15:27:10.395815 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-t252v" Dec 09 15:27:10 crc kubenswrapper[4716]: I1209 15:27:10.423054 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-8gjtr" event={"ID":"c34d08f8-624c-47b0-9373-f96e9f280c74","Type":"ContainerStarted","Data":"a9d3a805bca520cd83bffa0e7b3b465d11f9127fa23424aa1ec8157c3002f779"} Dec 09 15:27:10 crc kubenswrapper[4716]: I1209 15:27:10.888103 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-t252v"] Dec 09 15:27:10 crc kubenswrapper[4716]: W1209 15:27:10.890962 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8d5472cc_28f8_4887_97d5_b428c89a1730.slice/crio-a8a010bab2eb9b72b14f66ba2d86319b8b0dc9b2dac1224b69a2429e674f0c51 WatchSource:0}: Error finding container a8a010bab2eb9b72b14f66ba2d86319b8b0dc9b2dac1224b69a2429e674f0c51: Status 404 returned error can't find the container with id a8a010bab2eb9b72b14f66ba2d86319b8b0dc9b2dac1224b69a2429e674f0c51 Dec 09 15:27:11 crc kubenswrapper[4716]: I1209 15:27:11.351121 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d-memberlist\") pod \"speaker-z4t47\" (UID: \"59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d\") " pod="metallb-system/speaker-z4t47" Dec 09 15:27:11 crc kubenswrapper[4716]: I1209 15:27:11.366803 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d-memberlist\") pod \"speaker-z4t47\" (UID: \"59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d\") " pod="metallb-system/speaker-z4t47" Dec 09 15:27:11 crc kubenswrapper[4716]: I1209 15:27:11.386912 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-z4t47" Dec 09 15:27:11 crc kubenswrapper[4716]: W1209 15:27:11.421427 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod59f248b3_5ec0_4aef_bee1_1e0bf7ebaa9d.slice/crio-96aea7f977d61e76f876c2b2d40d9310c799c5d4eec049e255cad13fb647c40b WatchSource:0}: Error finding container 96aea7f977d61e76f876c2b2d40d9310c799c5d4eec049e255cad13fb647c40b: Status 404 returned error can't find the container with id 96aea7f977d61e76f876c2b2d40d9310c799c5d4eec049e255cad13fb647c40b Dec 09 15:27:11 crc kubenswrapper[4716]: I1209 15:27:11.432906 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ztmht" event={"ID":"e2caed4f-2e2a-4658-8705-49c1b6d66492","Type":"ContainerStarted","Data":"9a1a81862e0666e0d79bdd8923807b2b7b6a3611f0db3b39f3ba4128360e6c0f"} Dec 09 15:27:11 crc kubenswrapper[4716]: I1209 15:27:11.434040 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-z4t47" event={"ID":"59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d","Type":"ContainerStarted","Data":"96aea7f977d61e76f876c2b2d40d9310c799c5d4eec049e255cad13fb647c40b"} Dec 09 15:27:11 crc kubenswrapper[4716]: I1209 15:27:11.435418 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-t252v" event={"ID":"8d5472cc-28f8-4887-97d5-b428c89a1730","Type":"ContainerStarted","Data":"a8a010bab2eb9b72b14f66ba2d86319b8b0dc9b2dac1224b69a2429e674f0c51"} Dec 09 15:27:11 crc kubenswrapper[4716]: I1209 15:27:11.437477 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-8gjtr" event={"ID":"c34d08f8-624c-47b0-9373-f96e9f280c74","Type":"ContainerStarted","Data":"4425a31d73286d96b5d11983363689fd8bad129c34f1955a798c25dc67e7083f"} Dec 09 15:27:11 crc kubenswrapper[4716]: I1209 15:27:11.437506 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-8gjtr" event={"ID":"c34d08f8-624c-47b0-9373-f96e9f280c74","Type":"ContainerStarted","Data":"1fcb936d5642fba8a2e5bbb2d4375895b705a7fcc03c0bb0b27db9eb497133e0"} Dec 09 15:27:11 crc kubenswrapper[4716]: I1209 15:27:11.437676 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-f8648f98b-8gjtr" Dec 09 15:27:11 crc kubenswrapper[4716]: I1209 15:27:11.498087 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-f8648f98b-8gjtr" podStartSLOduration=2.498070639 podStartE2EDuration="2.498070639s" podCreationTimestamp="2025-12-09 15:27:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:27:11.496022891 +0000 UTC m=+1118.650766879" watchObservedRunningTime="2025-12-09 15:27:11.498070639 +0000 UTC m=+1118.652814627" Dec 09 15:27:12 crc kubenswrapper[4716]: I1209 15:27:12.458135 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-z4t47" event={"ID":"59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d","Type":"ContainerStarted","Data":"d24198539e11e0e011f11a06d5b7f1c289314047f31dc59635f3c281f53dd1f5"} Dec 09 15:27:12 crc kubenswrapper[4716]: I1209 15:27:12.458528 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-z4t47" Dec 09 15:27:12 crc kubenswrapper[4716]: I1209 15:27:12.458540 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-z4t47" event={"ID":"59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d","Type":"ContainerStarted","Data":"f2c297cdd39d26cca1aa7193c3f1d8fa46a9bb878fa7e8ae89d23e4a62a97925"} Dec 09 15:27:13 crc kubenswrapper[4716]: I1209 15:27:13.241061 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-z4t47" podStartSLOduration=4.241039306 podStartE2EDuration="4.241039306s" podCreationTimestamp="2025-12-09 15:27:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:27:12.489553854 +0000 UTC m=+1119.644297842" watchObservedRunningTime="2025-12-09 15:27:13.241039306 +0000 UTC m=+1120.395783294" Dec 09 15:27:18 crc kubenswrapper[4716]: I1209 15:27:18.511510 4716 generic.go:334] "Generic (PLEG): container finished" podID="e2caed4f-2e2a-4658-8705-49c1b6d66492" containerID="ea2b5c04df58145c3dcef4b147adeccecf68f6fa5aa42c461bf0df8a52d6a0be" exitCode=0 Dec 09 15:27:18 crc kubenswrapper[4716]: I1209 15:27:18.511609 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ztmht" event={"ID":"e2caed4f-2e2a-4658-8705-49c1b6d66492","Type":"ContainerDied","Data":"ea2b5c04df58145c3dcef4b147adeccecf68f6fa5aa42c461bf0df8a52d6a0be"} Dec 09 15:27:18 crc kubenswrapper[4716]: I1209 15:27:18.514615 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-t252v" event={"ID":"8d5472cc-28f8-4887-97d5-b428c89a1730","Type":"ContainerStarted","Data":"9c3d4986795bb78e1acc5186ff19a783f8ad213ac21c8c7a701e37b9d71afa50"} Dec 09 15:27:18 crc kubenswrapper[4716]: I1209 15:27:18.514787 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-t252v" Dec 09 15:27:18 crc kubenswrapper[4716]: I1209 15:27:18.566762 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-t252v" podStartSLOduration=2.743749103 podStartE2EDuration="9.566732879s" podCreationTimestamp="2025-12-09 15:27:09 +0000 UTC" firstStartedPulling="2025-12-09 15:27:10.894093188 +0000 UTC m=+1118.048837166" lastFinishedPulling="2025-12-09 15:27:17.717076954 +0000 UTC m=+1124.871820942" observedRunningTime="2025-12-09 15:27:18.553983177 +0000 UTC m=+1125.708727165" watchObservedRunningTime="2025-12-09 15:27:18.566732879 +0000 UTC m=+1125.721476887" Dec 09 15:27:19 crc kubenswrapper[4716]: I1209 15:27:19.523004 4716 generic.go:334] "Generic (PLEG): container finished" podID="e2caed4f-2e2a-4658-8705-49c1b6d66492" containerID="9c6d68caf4f9201e56599d24cde4aadca805e6b97a6343e04bf40c869d1c0436" exitCode=0 Dec 09 15:27:19 crc kubenswrapper[4716]: I1209 15:27:19.523104 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ztmht" event={"ID":"e2caed4f-2e2a-4658-8705-49c1b6d66492","Type":"ContainerDied","Data":"9c6d68caf4f9201e56599d24cde4aadca805e6b97a6343e04bf40c869d1c0436"} Dec 09 15:27:20 crc kubenswrapper[4716]: I1209 15:27:20.532738 4716 generic.go:334] "Generic (PLEG): container finished" podID="e2caed4f-2e2a-4658-8705-49c1b6d66492" containerID="763f4bf93ddc68503d97ac9ed0df142537d2d1ec339f93be55bf1ebe680c6962" exitCode=0 Dec 09 15:27:20 crc kubenswrapper[4716]: I1209 15:27:20.532820 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ztmht" event={"ID":"e2caed4f-2e2a-4658-8705-49c1b6d66492","Type":"ContainerDied","Data":"763f4bf93ddc68503d97ac9ed0df142537d2d1ec339f93be55bf1ebe680c6962"} Dec 09 15:27:21 crc kubenswrapper[4716]: I1209 15:27:21.391275 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-z4t47" Dec 09 15:27:21 crc kubenswrapper[4716]: I1209 15:27:21.545662 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ztmht" event={"ID":"e2caed4f-2e2a-4658-8705-49c1b6d66492","Type":"ContainerStarted","Data":"f26bcb63b0a0cffffa4cace66d913a37af8cd0ae81b0baf95e8983df5bde1629"} Dec 09 15:27:21 crc kubenswrapper[4716]: I1209 15:27:21.545710 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ztmht" event={"ID":"e2caed4f-2e2a-4658-8705-49c1b6d66492","Type":"ContainerStarted","Data":"4d8c0e4c84032172e068651709f55f3c910d65892f7ab5f600f02c4b6e5dfdaf"} Dec 09 15:27:21 crc kubenswrapper[4716]: I1209 15:27:21.545720 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ztmht" event={"ID":"e2caed4f-2e2a-4658-8705-49c1b6d66492","Type":"ContainerStarted","Data":"3222efd660f1639f61c904c22c7ecb41fda742e887b46e04e82742b3e19fde75"} Dec 09 15:27:21 crc kubenswrapper[4716]: I1209 15:27:21.545729 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ztmht" event={"ID":"e2caed4f-2e2a-4658-8705-49c1b6d66492","Type":"ContainerStarted","Data":"324d77aab64786db2243d428ec007b5c5e185239344b984493e43f95c3398333"} Dec 09 15:27:21 crc kubenswrapper[4716]: I1209 15:27:21.545737 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ztmht" event={"ID":"e2caed4f-2e2a-4658-8705-49c1b6d66492","Type":"ContainerStarted","Data":"f6961dd4a7e9ea482bc53451b99f198fe6071e203b1eafec1987ab29648164f6"} Dec 09 15:27:22 crc kubenswrapper[4716]: I1209 15:27:22.557606 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ztmht" event={"ID":"e2caed4f-2e2a-4658-8705-49c1b6d66492","Type":"ContainerStarted","Data":"62d2c2dfdd82464e826cbdae1599f34292de73cb7a59a61c98b2affbd630705d"} Dec 09 15:27:22 crc kubenswrapper[4716]: I1209 15:27:22.558728 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-ztmht" Dec 09 15:27:22 crc kubenswrapper[4716]: I1209 15:27:22.586868 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-ztmht" podStartSLOduration=6.514498951 podStartE2EDuration="13.586850545s" podCreationTimestamp="2025-12-09 15:27:09 +0000 UTC" firstStartedPulling="2025-12-09 15:27:10.623681563 +0000 UTC m=+1117.778425551" lastFinishedPulling="2025-12-09 15:27:17.696033157 +0000 UTC m=+1124.850777145" observedRunningTime="2025-12-09 15:27:22.583475719 +0000 UTC m=+1129.738219717" watchObservedRunningTime="2025-12-09 15:27:22.586850545 +0000 UTC m=+1129.741594533" Dec 09 15:27:24 crc kubenswrapper[4716]: I1209 15:27:24.361108 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-htdm6"] Dec 09 15:27:24 crc kubenswrapper[4716]: I1209 15:27:24.362745 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-htdm6" Dec 09 15:27:24 crc kubenswrapper[4716]: I1209 15:27:24.364983 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Dec 09 15:27:24 crc kubenswrapper[4716]: I1209 15:27:24.367077 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-nzd22" Dec 09 15:27:24 crc kubenswrapper[4716]: I1209 15:27:24.367253 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Dec 09 15:27:24 crc kubenswrapper[4716]: I1209 15:27:24.378421 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-htdm6"] Dec 09 15:27:24 crc kubenswrapper[4716]: I1209 15:27:24.513490 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vr2rq\" (UniqueName: \"kubernetes.io/projected/e4de4221-b1b6-4b7a-a720-04a7c3354558-kube-api-access-vr2rq\") pod \"openstack-operator-index-htdm6\" (UID: \"e4de4221-b1b6-4b7a-a720-04a7c3354558\") " pod="openstack-operators/openstack-operator-index-htdm6" Dec 09 15:27:24 crc kubenswrapper[4716]: I1209 15:27:24.614891 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vr2rq\" (UniqueName: \"kubernetes.io/projected/e4de4221-b1b6-4b7a-a720-04a7c3354558-kube-api-access-vr2rq\") pod \"openstack-operator-index-htdm6\" (UID: \"e4de4221-b1b6-4b7a-a720-04a7c3354558\") " pod="openstack-operators/openstack-operator-index-htdm6" Dec 09 15:27:24 crc kubenswrapper[4716]: I1209 15:27:24.634747 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vr2rq\" (UniqueName: \"kubernetes.io/projected/e4de4221-b1b6-4b7a-a720-04a7c3354558-kube-api-access-vr2rq\") pod \"openstack-operator-index-htdm6\" (UID: \"e4de4221-b1b6-4b7a-a720-04a7c3354558\") " pod="openstack-operators/openstack-operator-index-htdm6" Dec 09 15:27:24 crc kubenswrapper[4716]: I1209 15:27:24.696591 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-htdm6" Dec 09 15:27:25 crc kubenswrapper[4716]: I1209 15:27:25.123361 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-htdm6"] Dec 09 15:27:25 crc kubenswrapper[4716]: I1209 15:27:25.384174 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-ztmht" Dec 09 15:27:25 crc kubenswrapper[4716]: I1209 15:27:25.423461 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-ztmht" Dec 09 15:27:25 crc kubenswrapper[4716]: I1209 15:27:25.582124 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-htdm6" event={"ID":"e4de4221-b1b6-4b7a-a720-04a7c3354558","Type":"ContainerStarted","Data":"a279287d5de8ee8b8294e6ddfee0912a0be5edbce244bc94241b424cc31e87bd"} Dec 09 15:27:27 crc kubenswrapper[4716]: I1209 15:27:27.737769 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-htdm6"] Dec 09 15:27:28 crc kubenswrapper[4716]: I1209 15:27:28.346352 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-wzhdn"] Dec 09 15:27:28 crc kubenswrapper[4716]: I1209 15:27:28.356382 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-wzhdn" Dec 09 15:27:28 crc kubenswrapper[4716]: I1209 15:27:28.364050 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-wzhdn"] Dec 09 15:27:28 crc kubenswrapper[4716]: I1209 15:27:28.489493 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lfmn8\" (UniqueName: \"kubernetes.io/projected/685c966f-2aa9-42bc-8d60-d2ebc9b68b0a-kube-api-access-lfmn8\") pod \"openstack-operator-index-wzhdn\" (UID: \"685c966f-2aa9-42bc-8d60-d2ebc9b68b0a\") " pod="openstack-operators/openstack-operator-index-wzhdn" Dec 09 15:27:28 crc kubenswrapper[4716]: I1209 15:27:28.591432 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lfmn8\" (UniqueName: \"kubernetes.io/projected/685c966f-2aa9-42bc-8d60-d2ebc9b68b0a-kube-api-access-lfmn8\") pod \"openstack-operator-index-wzhdn\" (UID: \"685c966f-2aa9-42bc-8d60-d2ebc9b68b0a\") " pod="openstack-operators/openstack-operator-index-wzhdn" Dec 09 15:27:28 crc kubenswrapper[4716]: I1209 15:27:28.614784 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lfmn8\" (UniqueName: \"kubernetes.io/projected/685c966f-2aa9-42bc-8d60-d2ebc9b68b0a-kube-api-access-lfmn8\") pod \"openstack-operator-index-wzhdn\" (UID: \"685c966f-2aa9-42bc-8d60-d2ebc9b68b0a\") " pod="openstack-operators/openstack-operator-index-wzhdn" Dec 09 15:27:28 crc kubenswrapper[4716]: I1209 15:27:28.688839 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-wzhdn" Dec 09 15:27:29 crc kubenswrapper[4716]: I1209 15:27:29.359817 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-wzhdn"] Dec 09 15:27:29 crc kubenswrapper[4716]: W1209 15:27:29.373174 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod685c966f_2aa9_42bc_8d60_d2ebc9b68b0a.slice/crio-fe43bddde0960594c87ae833c89c9e98eda21e74740fa67412f3c43d88efc03a WatchSource:0}: Error finding container fe43bddde0960594c87ae833c89c9e98eda21e74740fa67412f3c43d88efc03a: Status 404 returned error can't find the container with id fe43bddde0960594c87ae833c89c9e98eda21e74740fa67412f3c43d88efc03a Dec 09 15:27:29 crc kubenswrapper[4716]: I1209 15:27:29.650969 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-wzhdn" event={"ID":"685c966f-2aa9-42bc-8d60-d2ebc9b68b0a","Type":"ContainerStarted","Data":"32ec82a233bf2dbdd1b6015749d7b9e5d53e8e6ab8b97bb5b8affd156c8e494b"} Dec 09 15:27:29 crc kubenswrapper[4716]: I1209 15:27:29.651268 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-wzhdn" event={"ID":"685c966f-2aa9-42bc-8d60-d2ebc9b68b0a","Type":"ContainerStarted","Data":"fe43bddde0960594c87ae833c89c9e98eda21e74740fa67412f3c43d88efc03a"} Dec 09 15:27:29 crc kubenswrapper[4716]: I1209 15:27:29.652930 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-htdm6" event={"ID":"e4de4221-b1b6-4b7a-a720-04a7c3354558","Type":"ContainerStarted","Data":"4a6852b7f908797231a32747dcc88dd2edcf1229dd960267463fa3aefa791ed5"} Dec 09 15:27:29 crc kubenswrapper[4716]: I1209 15:27:29.653099 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-htdm6" podUID="e4de4221-b1b6-4b7a-a720-04a7c3354558" containerName="registry-server" containerID="cri-o://4a6852b7f908797231a32747dcc88dd2edcf1229dd960267463fa3aefa791ed5" gracePeriod=2 Dec 09 15:27:29 crc kubenswrapper[4716]: I1209 15:27:29.672679 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-wzhdn" podStartSLOduration=1.610907397 podStartE2EDuration="1.672655438s" podCreationTimestamp="2025-12-09 15:27:28 +0000 UTC" firstStartedPulling="2025-12-09 15:27:29.377850191 +0000 UTC m=+1136.532594179" lastFinishedPulling="2025-12-09 15:27:29.439598232 +0000 UTC m=+1136.594342220" observedRunningTime="2025-12-09 15:27:29.664840366 +0000 UTC m=+1136.819584344" watchObservedRunningTime="2025-12-09 15:27:29.672655438 +0000 UTC m=+1136.827399436" Dec 09 15:27:29 crc kubenswrapper[4716]: I1209 15:27:29.684367 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-htdm6" podStartSLOduration=1.843529748 podStartE2EDuration="5.684350379s" podCreationTimestamp="2025-12-09 15:27:24 +0000 UTC" firstStartedPulling="2025-12-09 15:27:25.132425221 +0000 UTC m=+1132.287169209" lastFinishedPulling="2025-12-09 15:27:28.973245852 +0000 UTC m=+1136.127989840" observedRunningTime="2025-12-09 15:27:29.679284336 +0000 UTC m=+1136.834028324" watchObservedRunningTime="2025-12-09 15:27:29.684350379 +0000 UTC m=+1136.839094367" Dec 09 15:27:29 crc kubenswrapper[4716]: I1209 15:27:29.920655 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-f8648f98b-8gjtr" Dec 09 15:27:30 crc kubenswrapper[4716]: I1209 15:27:30.123986 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-htdm6" Dec 09 15:27:30 crc kubenswrapper[4716]: I1209 15:27:30.220637 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vr2rq\" (UniqueName: \"kubernetes.io/projected/e4de4221-b1b6-4b7a-a720-04a7c3354558-kube-api-access-vr2rq\") pod \"e4de4221-b1b6-4b7a-a720-04a7c3354558\" (UID: \"e4de4221-b1b6-4b7a-a720-04a7c3354558\") " Dec 09 15:27:30 crc kubenswrapper[4716]: I1209 15:27:30.226979 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e4de4221-b1b6-4b7a-a720-04a7c3354558-kube-api-access-vr2rq" (OuterVolumeSpecName: "kube-api-access-vr2rq") pod "e4de4221-b1b6-4b7a-a720-04a7c3354558" (UID: "e4de4221-b1b6-4b7a-a720-04a7c3354558"). InnerVolumeSpecName "kube-api-access-vr2rq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:27:30 crc kubenswrapper[4716]: I1209 15:27:30.328525 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vr2rq\" (UniqueName: \"kubernetes.io/projected/e4de4221-b1b6-4b7a-a720-04a7c3354558-kube-api-access-vr2rq\") on node \"crc\" DevicePath \"\"" Dec 09 15:27:30 crc kubenswrapper[4716]: I1209 15:27:30.387062 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-ztmht" Dec 09 15:27:30 crc kubenswrapper[4716]: I1209 15:27:30.401024 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-t252v" Dec 09 15:27:30 crc kubenswrapper[4716]: I1209 15:27:30.661791 4716 generic.go:334] "Generic (PLEG): container finished" podID="e4de4221-b1b6-4b7a-a720-04a7c3354558" containerID="4a6852b7f908797231a32747dcc88dd2edcf1229dd960267463fa3aefa791ed5" exitCode=0 Dec 09 15:27:30 crc kubenswrapper[4716]: I1209 15:27:30.661863 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-htdm6" Dec 09 15:27:30 crc kubenswrapper[4716]: I1209 15:27:30.661872 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-htdm6" event={"ID":"e4de4221-b1b6-4b7a-a720-04a7c3354558","Type":"ContainerDied","Data":"4a6852b7f908797231a32747dcc88dd2edcf1229dd960267463fa3aefa791ed5"} Dec 09 15:27:30 crc kubenswrapper[4716]: I1209 15:27:30.662347 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-htdm6" event={"ID":"e4de4221-b1b6-4b7a-a720-04a7c3354558","Type":"ContainerDied","Data":"a279287d5de8ee8b8294e6ddfee0912a0be5edbce244bc94241b424cc31e87bd"} Dec 09 15:27:30 crc kubenswrapper[4716]: I1209 15:27:30.662378 4716 scope.go:117] "RemoveContainer" containerID="4a6852b7f908797231a32747dcc88dd2edcf1229dd960267463fa3aefa791ed5" Dec 09 15:27:30 crc kubenswrapper[4716]: I1209 15:27:30.682882 4716 scope.go:117] "RemoveContainer" containerID="4a6852b7f908797231a32747dcc88dd2edcf1229dd960267463fa3aefa791ed5" Dec 09 15:27:30 crc kubenswrapper[4716]: E1209 15:27:30.683366 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4a6852b7f908797231a32747dcc88dd2edcf1229dd960267463fa3aefa791ed5\": container with ID starting with 4a6852b7f908797231a32747dcc88dd2edcf1229dd960267463fa3aefa791ed5 not found: ID does not exist" containerID="4a6852b7f908797231a32747dcc88dd2edcf1229dd960267463fa3aefa791ed5" Dec 09 15:27:30 crc kubenswrapper[4716]: I1209 15:27:30.683497 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a6852b7f908797231a32747dcc88dd2edcf1229dd960267463fa3aefa791ed5"} err="failed to get container status \"4a6852b7f908797231a32747dcc88dd2edcf1229dd960267463fa3aefa791ed5\": rpc error: code = NotFound desc = could not find container \"4a6852b7f908797231a32747dcc88dd2edcf1229dd960267463fa3aefa791ed5\": container with ID starting with 4a6852b7f908797231a32747dcc88dd2edcf1229dd960267463fa3aefa791ed5 not found: ID does not exist" Dec 09 15:27:30 crc kubenswrapper[4716]: I1209 15:27:30.701274 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-htdm6"] Dec 09 15:27:30 crc kubenswrapper[4716]: I1209 15:27:30.704585 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-htdm6"] Dec 09 15:27:31 crc kubenswrapper[4716]: I1209 15:27:31.224313 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e4de4221-b1b6-4b7a-a720-04a7c3354558" path="/var/lib/kubelet/pods/e4de4221-b1b6-4b7a-a720-04a7c3354558/volumes" Dec 09 15:27:38 crc kubenswrapper[4716]: I1209 15:27:38.689474 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-wzhdn" Dec 09 15:27:38 crc kubenswrapper[4716]: I1209 15:27:38.689725 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-wzhdn" Dec 09 15:27:38 crc kubenswrapper[4716]: I1209 15:27:38.715270 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-wzhdn" Dec 09 15:27:38 crc kubenswrapper[4716]: I1209 15:27:38.748161 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-wzhdn" Dec 09 15:27:44 crc kubenswrapper[4716]: I1209 15:27:44.154870 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/09f547718dbde8eef0eb42ddec4b9f2289bf5ccec79a5d1ae23a41cf297f48v"] Dec 09 15:27:44 crc kubenswrapper[4716]: E1209 15:27:44.155741 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4de4221-b1b6-4b7a-a720-04a7c3354558" containerName="registry-server" Dec 09 15:27:44 crc kubenswrapper[4716]: I1209 15:27:44.155755 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4de4221-b1b6-4b7a-a720-04a7c3354558" containerName="registry-server" Dec 09 15:27:44 crc kubenswrapper[4716]: I1209 15:27:44.155956 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4de4221-b1b6-4b7a-a720-04a7c3354558" containerName="registry-server" Dec 09 15:27:44 crc kubenswrapper[4716]: I1209 15:27:44.157319 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/09f547718dbde8eef0eb42ddec4b9f2289bf5ccec79a5d1ae23a41cf297f48v" Dec 09 15:27:44 crc kubenswrapper[4716]: I1209 15:27:44.159908 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-l8t2l" Dec 09 15:27:44 crc kubenswrapper[4716]: I1209 15:27:44.172078 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/09f547718dbde8eef0eb42ddec4b9f2289bf5ccec79a5d1ae23a41cf297f48v"] Dec 09 15:27:44 crc kubenswrapper[4716]: I1209 15:27:44.251039 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4d0cce9d-1fc7-4f60-9651-e25306d13944-bundle\") pod \"09f547718dbde8eef0eb42ddec4b9f2289bf5ccec79a5d1ae23a41cf297f48v\" (UID: \"4d0cce9d-1fc7-4f60-9651-e25306d13944\") " pod="openstack-operators/09f547718dbde8eef0eb42ddec4b9f2289bf5ccec79a5d1ae23a41cf297f48v" Dec 09 15:27:44 crc kubenswrapper[4716]: I1209 15:27:44.251564 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4d0cce9d-1fc7-4f60-9651-e25306d13944-util\") pod \"09f547718dbde8eef0eb42ddec4b9f2289bf5ccec79a5d1ae23a41cf297f48v\" (UID: \"4d0cce9d-1fc7-4f60-9651-e25306d13944\") " pod="openstack-operators/09f547718dbde8eef0eb42ddec4b9f2289bf5ccec79a5d1ae23a41cf297f48v" Dec 09 15:27:44 crc kubenswrapper[4716]: I1209 15:27:44.251643 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4krvb\" (UniqueName: \"kubernetes.io/projected/4d0cce9d-1fc7-4f60-9651-e25306d13944-kube-api-access-4krvb\") pod \"09f547718dbde8eef0eb42ddec4b9f2289bf5ccec79a5d1ae23a41cf297f48v\" (UID: \"4d0cce9d-1fc7-4f60-9651-e25306d13944\") " pod="openstack-operators/09f547718dbde8eef0eb42ddec4b9f2289bf5ccec79a5d1ae23a41cf297f48v" Dec 09 15:27:44 crc kubenswrapper[4716]: I1209 15:27:44.353809 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4krvb\" (UniqueName: \"kubernetes.io/projected/4d0cce9d-1fc7-4f60-9651-e25306d13944-kube-api-access-4krvb\") pod \"09f547718dbde8eef0eb42ddec4b9f2289bf5ccec79a5d1ae23a41cf297f48v\" (UID: \"4d0cce9d-1fc7-4f60-9651-e25306d13944\") " pod="openstack-operators/09f547718dbde8eef0eb42ddec4b9f2289bf5ccec79a5d1ae23a41cf297f48v" Dec 09 15:27:44 crc kubenswrapper[4716]: I1209 15:27:44.353901 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4d0cce9d-1fc7-4f60-9651-e25306d13944-bundle\") pod \"09f547718dbde8eef0eb42ddec4b9f2289bf5ccec79a5d1ae23a41cf297f48v\" (UID: \"4d0cce9d-1fc7-4f60-9651-e25306d13944\") " pod="openstack-operators/09f547718dbde8eef0eb42ddec4b9f2289bf5ccec79a5d1ae23a41cf297f48v" Dec 09 15:27:44 crc kubenswrapper[4716]: I1209 15:27:44.354467 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4d0cce9d-1fc7-4f60-9651-e25306d13944-bundle\") pod \"09f547718dbde8eef0eb42ddec4b9f2289bf5ccec79a5d1ae23a41cf297f48v\" (UID: \"4d0cce9d-1fc7-4f60-9651-e25306d13944\") " pod="openstack-operators/09f547718dbde8eef0eb42ddec4b9f2289bf5ccec79a5d1ae23a41cf297f48v" Dec 09 15:27:44 crc kubenswrapper[4716]: I1209 15:27:44.354681 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4d0cce9d-1fc7-4f60-9651-e25306d13944-util\") pod \"09f547718dbde8eef0eb42ddec4b9f2289bf5ccec79a5d1ae23a41cf297f48v\" (UID: \"4d0cce9d-1fc7-4f60-9651-e25306d13944\") " pod="openstack-operators/09f547718dbde8eef0eb42ddec4b9f2289bf5ccec79a5d1ae23a41cf297f48v" Dec 09 15:27:44 crc kubenswrapper[4716]: I1209 15:27:44.355001 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4d0cce9d-1fc7-4f60-9651-e25306d13944-util\") pod \"09f547718dbde8eef0eb42ddec4b9f2289bf5ccec79a5d1ae23a41cf297f48v\" (UID: \"4d0cce9d-1fc7-4f60-9651-e25306d13944\") " pod="openstack-operators/09f547718dbde8eef0eb42ddec4b9f2289bf5ccec79a5d1ae23a41cf297f48v" Dec 09 15:27:44 crc kubenswrapper[4716]: I1209 15:27:44.374784 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4krvb\" (UniqueName: \"kubernetes.io/projected/4d0cce9d-1fc7-4f60-9651-e25306d13944-kube-api-access-4krvb\") pod \"09f547718dbde8eef0eb42ddec4b9f2289bf5ccec79a5d1ae23a41cf297f48v\" (UID: \"4d0cce9d-1fc7-4f60-9651-e25306d13944\") " pod="openstack-operators/09f547718dbde8eef0eb42ddec4b9f2289bf5ccec79a5d1ae23a41cf297f48v" Dec 09 15:27:44 crc kubenswrapper[4716]: I1209 15:27:44.478976 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/09f547718dbde8eef0eb42ddec4b9f2289bf5ccec79a5d1ae23a41cf297f48v" Dec 09 15:27:44 crc kubenswrapper[4716]: I1209 15:27:44.963059 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/09f547718dbde8eef0eb42ddec4b9f2289bf5ccec79a5d1ae23a41cf297f48v"] Dec 09 15:27:46 crc kubenswrapper[4716]: I1209 15:27:45.794760 4716 generic.go:334] "Generic (PLEG): container finished" podID="4d0cce9d-1fc7-4f60-9651-e25306d13944" containerID="a3d626be3423849226f0b68557867ff055f3e675666b872961d64a2efc152d06" exitCode=0 Dec 09 15:27:46 crc kubenswrapper[4716]: I1209 15:27:45.795047 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/09f547718dbde8eef0eb42ddec4b9f2289bf5ccec79a5d1ae23a41cf297f48v" event={"ID":"4d0cce9d-1fc7-4f60-9651-e25306d13944","Type":"ContainerDied","Data":"a3d626be3423849226f0b68557867ff055f3e675666b872961d64a2efc152d06"} Dec 09 15:27:46 crc kubenswrapper[4716]: I1209 15:27:45.795077 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/09f547718dbde8eef0eb42ddec4b9f2289bf5ccec79a5d1ae23a41cf297f48v" event={"ID":"4d0cce9d-1fc7-4f60-9651-e25306d13944","Type":"ContainerStarted","Data":"2a388e4b623f88406ecbe074f9acbc8ad63b5a134ff427f87e4a85cb96b64078"} Dec 09 15:27:46 crc kubenswrapper[4716]: I1209 15:27:46.806443 4716 generic.go:334] "Generic (PLEG): container finished" podID="4d0cce9d-1fc7-4f60-9651-e25306d13944" containerID="c0aaa08e6691f9c1fd8a6ed5d566f219aab3dc837269f1c0763593c2132364d3" exitCode=0 Dec 09 15:27:46 crc kubenswrapper[4716]: I1209 15:27:46.806528 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/09f547718dbde8eef0eb42ddec4b9f2289bf5ccec79a5d1ae23a41cf297f48v" event={"ID":"4d0cce9d-1fc7-4f60-9651-e25306d13944","Type":"ContainerDied","Data":"c0aaa08e6691f9c1fd8a6ed5d566f219aab3dc837269f1c0763593c2132364d3"} Dec 09 15:27:47 crc kubenswrapper[4716]: I1209 15:27:47.817741 4716 generic.go:334] "Generic (PLEG): container finished" podID="4d0cce9d-1fc7-4f60-9651-e25306d13944" containerID="53e5b73d35cd114289120809a89ccacb46f2a33143281d3ffecefa649c0caa5a" exitCode=0 Dec 09 15:27:47 crc kubenswrapper[4716]: I1209 15:27:47.817803 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/09f547718dbde8eef0eb42ddec4b9f2289bf5ccec79a5d1ae23a41cf297f48v" event={"ID":"4d0cce9d-1fc7-4f60-9651-e25306d13944","Type":"ContainerDied","Data":"53e5b73d35cd114289120809a89ccacb46f2a33143281d3ffecefa649c0caa5a"} Dec 09 15:27:49 crc kubenswrapper[4716]: I1209 15:27:49.114556 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/09f547718dbde8eef0eb42ddec4b9f2289bf5ccec79a5d1ae23a41cf297f48v" Dec 09 15:27:49 crc kubenswrapper[4716]: I1209 15:27:49.173526 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4d0cce9d-1fc7-4f60-9651-e25306d13944-util\") pod \"4d0cce9d-1fc7-4f60-9651-e25306d13944\" (UID: \"4d0cce9d-1fc7-4f60-9651-e25306d13944\") " Dec 09 15:27:49 crc kubenswrapper[4716]: I1209 15:27:49.173591 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4krvb\" (UniqueName: \"kubernetes.io/projected/4d0cce9d-1fc7-4f60-9651-e25306d13944-kube-api-access-4krvb\") pod \"4d0cce9d-1fc7-4f60-9651-e25306d13944\" (UID: \"4d0cce9d-1fc7-4f60-9651-e25306d13944\") " Dec 09 15:27:49 crc kubenswrapper[4716]: I1209 15:27:49.173734 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4d0cce9d-1fc7-4f60-9651-e25306d13944-bundle\") pod \"4d0cce9d-1fc7-4f60-9651-e25306d13944\" (UID: \"4d0cce9d-1fc7-4f60-9651-e25306d13944\") " Dec 09 15:27:49 crc kubenswrapper[4716]: I1209 15:27:49.174555 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4d0cce9d-1fc7-4f60-9651-e25306d13944-bundle" (OuterVolumeSpecName: "bundle") pod "4d0cce9d-1fc7-4f60-9651-e25306d13944" (UID: "4d0cce9d-1fc7-4f60-9651-e25306d13944"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:27:49 crc kubenswrapper[4716]: I1209 15:27:49.174696 4716 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4d0cce9d-1fc7-4f60-9651-e25306d13944-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:27:49 crc kubenswrapper[4716]: I1209 15:27:49.182789 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d0cce9d-1fc7-4f60-9651-e25306d13944-kube-api-access-4krvb" (OuterVolumeSpecName: "kube-api-access-4krvb") pod "4d0cce9d-1fc7-4f60-9651-e25306d13944" (UID: "4d0cce9d-1fc7-4f60-9651-e25306d13944"). InnerVolumeSpecName "kube-api-access-4krvb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:27:49 crc kubenswrapper[4716]: I1209 15:27:49.194538 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4d0cce9d-1fc7-4f60-9651-e25306d13944-util" (OuterVolumeSpecName: "util") pod "4d0cce9d-1fc7-4f60-9651-e25306d13944" (UID: "4d0cce9d-1fc7-4f60-9651-e25306d13944"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:27:49 crc kubenswrapper[4716]: I1209 15:27:49.276223 4716 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4d0cce9d-1fc7-4f60-9651-e25306d13944-util\") on node \"crc\" DevicePath \"\"" Dec 09 15:27:49 crc kubenswrapper[4716]: I1209 15:27:49.276560 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4krvb\" (UniqueName: \"kubernetes.io/projected/4d0cce9d-1fc7-4f60-9651-e25306d13944-kube-api-access-4krvb\") on node \"crc\" DevicePath \"\"" Dec 09 15:27:49 crc kubenswrapper[4716]: I1209 15:27:49.835817 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/09f547718dbde8eef0eb42ddec4b9f2289bf5ccec79a5d1ae23a41cf297f48v" event={"ID":"4d0cce9d-1fc7-4f60-9651-e25306d13944","Type":"ContainerDied","Data":"2a388e4b623f88406ecbe074f9acbc8ad63b5a134ff427f87e4a85cb96b64078"} Dec 09 15:27:49 crc kubenswrapper[4716]: I1209 15:27:49.835895 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2a388e4b623f88406ecbe074f9acbc8ad63b5a134ff427f87e4a85cb96b64078" Dec 09 15:27:49 crc kubenswrapper[4716]: I1209 15:27:49.835936 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/09f547718dbde8eef0eb42ddec4b9f2289bf5ccec79a5d1ae23a41cf297f48v" Dec 09 15:27:56 crc kubenswrapper[4716]: I1209 15:27:56.704703 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-77b4579dbb-785g6"] Dec 09 15:27:56 crc kubenswrapper[4716]: E1209 15:27:56.705711 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d0cce9d-1fc7-4f60-9651-e25306d13944" containerName="pull" Dec 09 15:27:56 crc kubenswrapper[4716]: I1209 15:27:56.705731 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d0cce9d-1fc7-4f60-9651-e25306d13944" containerName="pull" Dec 09 15:27:56 crc kubenswrapper[4716]: E1209 15:27:56.705747 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d0cce9d-1fc7-4f60-9651-e25306d13944" containerName="util" Dec 09 15:27:56 crc kubenswrapper[4716]: I1209 15:27:56.705755 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d0cce9d-1fc7-4f60-9651-e25306d13944" containerName="util" Dec 09 15:27:56 crc kubenswrapper[4716]: E1209 15:27:56.705788 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d0cce9d-1fc7-4f60-9651-e25306d13944" containerName="extract" Dec 09 15:27:56 crc kubenswrapper[4716]: I1209 15:27:56.705797 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d0cce9d-1fc7-4f60-9651-e25306d13944" containerName="extract" Dec 09 15:27:56 crc kubenswrapper[4716]: I1209 15:27:56.706013 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d0cce9d-1fc7-4f60-9651-e25306d13944" containerName="extract" Dec 09 15:27:56 crc kubenswrapper[4716]: I1209 15:27:56.706767 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-77b4579dbb-785g6" Dec 09 15:27:56 crc kubenswrapper[4716]: I1209 15:27:56.716143 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-mcps7" Dec 09 15:27:56 crc kubenswrapper[4716]: I1209 15:27:56.739600 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-77b4579dbb-785g6"] Dec 09 15:27:56 crc kubenswrapper[4716]: I1209 15:27:56.818912 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvmjd\" (UniqueName: \"kubernetes.io/projected/7b3f5ffb-3826-458b-9777-efd0b8cc747e-kube-api-access-zvmjd\") pod \"openstack-operator-controller-operator-77b4579dbb-785g6\" (UID: \"7b3f5ffb-3826-458b-9777-efd0b8cc747e\") " pod="openstack-operators/openstack-operator-controller-operator-77b4579dbb-785g6" Dec 09 15:27:56 crc kubenswrapper[4716]: I1209 15:27:56.920215 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvmjd\" (UniqueName: \"kubernetes.io/projected/7b3f5ffb-3826-458b-9777-efd0b8cc747e-kube-api-access-zvmjd\") pod \"openstack-operator-controller-operator-77b4579dbb-785g6\" (UID: \"7b3f5ffb-3826-458b-9777-efd0b8cc747e\") " pod="openstack-operators/openstack-operator-controller-operator-77b4579dbb-785g6" Dec 09 15:27:56 crc kubenswrapper[4716]: I1209 15:27:56.942789 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvmjd\" (UniqueName: \"kubernetes.io/projected/7b3f5ffb-3826-458b-9777-efd0b8cc747e-kube-api-access-zvmjd\") pod \"openstack-operator-controller-operator-77b4579dbb-785g6\" (UID: \"7b3f5ffb-3826-458b-9777-efd0b8cc747e\") " pod="openstack-operators/openstack-operator-controller-operator-77b4579dbb-785g6" Dec 09 15:27:57 crc kubenswrapper[4716]: I1209 15:27:57.027149 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-77b4579dbb-785g6" Dec 09 15:27:57 crc kubenswrapper[4716]: I1209 15:27:57.477695 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-77b4579dbb-785g6"] Dec 09 15:27:57 crc kubenswrapper[4716]: I1209 15:27:57.925109 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-77b4579dbb-785g6" event={"ID":"7b3f5ffb-3826-458b-9777-efd0b8cc747e","Type":"ContainerStarted","Data":"1918267e2623ed8f3dcc9f25af82d8767ce7bd56f7e37e2a76548b3fa2d6e412"} Dec 09 15:28:02 crc kubenswrapper[4716]: I1209 15:28:02.973271 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-77b4579dbb-785g6" event={"ID":"7b3f5ffb-3826-458b-9777-efd0b8cc747e","Type":"ContainerStarted","Data":"ba158d1d206fc53ab17bc6639df14c81a802c9606bb090ba63264ec5fcca9cb2"} Dec 09 15:28:02 crc kubenswrapper[4716]: I1209 15:28:02.973998 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-77b4579dbb-785g6" Dec 09 15:28:03 crc kubenswrapper[4716]: I1209 15:28:03.013642 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-77b4579dbb-785g6" podStartSLOduration=2.62351896 podStartE2EDuration="7.013605682s" podCreationTimestamp="2025-12-09 15:27:56 +0000 UTC" firstStartedPulling="2025-12-09 15:27:57.480381193 +0000 UTC m=+1164.635125181" lastFinishedPulling="2025-12-09 15:28:01.870467915 +0000 UTC m=+1169.025211903" observedRunningTime="2025-12-09 15:28:03.000557361 +0000 UTC m=+1170.155301369" watchObservedRunningTime="2025-12-09 15:28:03.013605682 +0000 UTC m=+1170.168349670" Dec 09 15:28:07 crc kubenswrapper[4716]: I1209 15:28:07.031940 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-77b4579dbb-785g6" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.040691 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6c677c69b-xzp8n"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.042928 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-xzp8n" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.047655 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-tbjkp" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.053004 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7d9dfd778-z2l9s"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.054421 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-z2l9s" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.056784 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-bdm8m" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.065372 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-697fb699cf-p6hrb"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.067070 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-p6hrb" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.068521 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-zr6nd" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.078931 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7d9dfd778-z2l9s"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.096517 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6c677c69b-xzp8n"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.096702 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-697fb699cf-p6hrb"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.122394 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-5697bb5779-s7mv7"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.124040 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-s7mv7" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.127693 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-5f64f6f8bb-2ft2d"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.128988 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-2ft2d" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.131176 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-6rntq" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.140040 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c6d99b8f-9d9ms"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.142189 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-9d9ms" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.143684 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-njtb4\" (UniqueName: \"kubernetes.io/projected/266cbf21-b738-4045-902b-88deadcc5869-kube-api-access-njtb4\") pod \"cinder-operator-controller-manager-6c677c69b-xzp8n\" (UID: \"266cbf21-b738-4045-902b-88deadcc5869\") " pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-xzp8n" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.143731 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zc6wn\" (UniqueName: \"kubernetes.io/projected/33b32911-25d0-45d5-8009-4d9787875e86-kube-api-access-zc6wn\") pod \"barbican-operator-controller-manager-7d9dfd778-z2l9s\" (UID: \"33b32911-25d0-45d5-8009-4d9787875e86\") " pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-z2l9s" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.143874 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7q8w\" (UniqueName: \"kubernetes.io/projected/ee92f03c-d515-4aa3-ad3c-cce0c89fd12b-kube-api-access-w7q8w\") pod \"designate-operator-controller-manager-697fb699cf-p6hrb\" (UID: \"ee92f03c-d515-4aa3-ad3c-cce0c89fd12b\") " pod="openstack-operators/designate-operator-controller-manager-697fb699cf-p6hrb" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.148687 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-cpvcb" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.149168 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-4ggqx" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.155205 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-5697bb5779-s7mv7"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.162910 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c6d99b8f-9d9ms"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.172447 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5f64f6f8bb-2ft2d"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.186176 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-78d48bff9d-sktgh"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.189113 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-sktgh" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.194673 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.204607 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-m6p7d" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.248392 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b5a74069-1167-4e2a-a3b2-c11507b121ab-cert\") pod \"infra-operator-controller-manager-78d48bff9d-sktgh\" (UID: \"b5a74069-1167-4e2a-a3b2-c11507b121ab\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-sktgh" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.248466 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x7vkk\" (UniqueName: \"kubernetes.io/projected/b60a4ca3-f2c6-4829-bc4f-d47b0740e378-kube-api-access-x7vkk\") pod \"glance-operator-controller-manager-5697bb5779-s7mv7\" (UID: \"b60a4ca3-f2c6-4829-bc4f-d47b0740e378\") " pod="openstack-operators/glance-operator-controller-manager-5697bb5779-s7mv7" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.248507 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8qb96\" (UniqueName: \"kubernetes.io/projected/b5a74069-1167-4e2a-a3b2-c11507b121ab-kube-api-access-8qb96\") pod \"infra-operator-controller-manager-78d48bff9d-sktgh\" (UID: \"b5a74069-1167-4e2a-a3b2-c11507b121ab\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-sktgh" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.248577 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-njtb4\" (UniqueName: \"kubernetes.io/projected/266cbf21-b738-4045-902b-88deadcc5869-kube-api-access-njtb4\") pod \"cinder-operator-controller-manager-6c677c69b-xzp8n\" (UID: \"266cbf21-b738-4045-902b-88deadcc5869\") " pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-xzp8n" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.248615 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zc6wn\" (UniqueName: \"kubernetes.io/projected/33b32911-25d0-45d5-8009-4d9787875e86-kube-api-access-zc6wn\") pod \"barbican-operator-controller-manager-7d9dfd778-z2l9s\" (UID: \"33b32911-25d0-45d5-8009-4d9787875e86\") " pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-z2l9s" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.248742 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qctqv\" (UniqueName: \"kubernetes.io/projected/c2869bc2-fe1e-49a0-9bd5-ec2b1a23b489-kube-api-access-qctqv\") pod \"heat-operator-controller-manager-5f64f6f8bb-2ft2d\" (UID: \"c2869bc2-fe1e-49a0-9bd5-ec2b1a23b489\") " pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-2ft2d" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.248774 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fk2qf\" (UniqueName: \"kubernetes.io/projected/87589b07-5c3c-46c9-b84e-ffc2efa3b817-kube-api-access-fk2qf\") pod \"horizon-operator-controller-manager-68c6d99b8f-9d9ms\" (UID: \"87589b07-5c3c-46c9-b84e-ffc2efa3b817\") " pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-9d9ms" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.248915 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7q8w\" (UniqueName: \"kubernetes.io/projected/ee92f03c-d515-4aa3-ad3c-cce0c89fd12b-kube-api-access-w7q8w\") pod \"designate-operator-controller-manager-697fb699cf-p6hrb\" (UID: \"ee92f03c-d515-4aa3-ad3c-cce0c89fd12b\") " pod="openstack-operators/designate-operator-controller-manager-697fb699cf-p6hrb" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.253419 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-78d48bff9d-sktgh"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.253459 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-967d97867-hq8hn"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.254792 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7765d96ddf-szpds"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.255950 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-967d97867-hq8hn"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.256036 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-szpds" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.256402 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-967d97867-hq8hn" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.258766 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-ggp7h" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.259492 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7765d96ddf-szpds"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.273173 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-cjnmv" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.283448 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-5b5fd79c9c-dzpdl"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.285133 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-79c8c4686c-5pz92"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.300134 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-dzpdl" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.300814 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-5pz92" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.304132 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-njtb4\" (UniqueName: \"kubernetes.io/projected/266cbf21-b738-4045-902b-88deadcc5869-kube-api-access-njtb4\") pod \"cinder-operator-controller-manager-6c677c69b-xzp8n\" (UID: \"266cbf21-b738-4045-902b-88deadcc5869\") " pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-xzp8n" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.305926 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7q8w\" (UniqueName: \"kubernetes.io/projected/ee92f03c-d515-4aa3-ad3c-cce0c89fd12b-kube-api-access-w7q8w\") pod \"designate-operator-controller-manager-697fb699cf-p6hrb\" (UID: \"ee92f03c-d515-4aa3-ad3c-cce0c89fd12b\") " pod="openstack-operators/designate-operator-controller-manager-697fb699cf-p6hrb" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.323054 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5b5fd79c9c-dzpdl"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.328110 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-6ltqw" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.328417 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-pqfg7" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.329422 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zc6wn\" (UniqueName: \"kubernetes.io/projected/33b32911-25d0-45d5-8009-4d9787875e86-kube-api-access-zc6wn\") pod \"barbican-operator-controller-manager-7d9dfd778-z2l9s\" (UID: \"33b32911-25d0-45d5-8009-4d9787875e86\") " pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-z2l9s" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.332916 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-697bc559fc-pmct6"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.345838 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-pmct6" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.351373 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qctqv\" (UniqueName: \"kubernetes.io/projected/c2869bc2-fe1e-49a0-9bd5-ec2b1a23b489-kube-api-access-qctqv\") pod \"heat-operator-controller-manager-5f64f6f8bb-2ft2d\" (UID: \"c2869bc2-fe1e-49a0-9bd5-ec2b1a23b489\") " pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-2ft2d" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.351427 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w8kdh\" (UniqueName: \"kubernetes.io/projected/695a0bab-c2c8-4c7d-9420-2dc191000e54-kube-api-access-w8kdh\") pod \"ironic-operator-controller-manager-967d97867-hq8hn\" (UID: \"695a0bab-c2c8-4c7d-9420-2dc191000e54\") " pod="openstack-operators/ironic-operator-controller-manager-967d97867-hq8hn" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.351462 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fk2qf\" (UniqueName: \"kubernetes.io/projected/87589b07-5c3c-46c9-b84e-ffc2efa3b817-kube-api-access-fk2qf\") pod \"horizon-operator-controller-manager-68c6d99b8f-9d9ms\" (UID: \"87589b07-5c3c-46c9-b84e-ffc2efa3b817\") " pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-9d9ms" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.351525 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fklkt\" (UniqueName: \"kubernetes.io/projected/30c82b79-c03d-45d3-8b0a-ca506daf2934-kube-api-access-fklkt\") pod \"manila-operator-controller-manager-5b5fd79c9c-dzpdl\" (UID: \"30c82b79-c03d-45d3-8b0a-ca506daf2934\") " pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-dzpdl" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.351549 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b5a74069-1167-4e2a-a3b2-c11507b121ab-cert\") pod \"infra-operator-controller-manager-78d48bff9d-sktgh\" (UID: \"b5a74069-1167-4e2a-a3b2-c11507b121ab\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-sktgh" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.351573 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q9v2k\" (UniqueName: \"kubernetes.io/projected/436bb095-8486-4d67-9fc5-6596738065cc-kube-api-access-q9v2k\") pod \"keystone-operator-controller-manager-7765d96ddf-szpds\" (UID: \"436bb095-8486-4d67-9fc5-6596738065cc\") " pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-szpds" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.351596 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x7vkk\" (UniqueName: \"kubernetes.io/projected/b60a4ca3-f2c6-4829-bc4f-d47b0740e378-kube-api-access-x7vkk\") pod \"glance-operator-controller-manager-5697bb5779-s7mv7\" (UID: \"b60a4ca3-f2c6-4829-bc4f-d47b0740e378\") " pod="openstack-operators/glance-operator-controller-manager-5697bb5779-s7mv7" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.351633 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8qb96\" (UniqueName: \"kubernetes.io/projected/b5a74069-1167-4e2a-a3b2-c11507b121ab-kube-api-access-8qb96\") pod \"infra-operator-controller-manager-78d48bff9d-sktgh\" (UID: \"b5a74069-1167-4e2a-a3b2-c11507b121ab\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-sktgh" Dec 09 15:28:27 crc kubenswrapper[4716]: E1209 15:28:27.352222 4716 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 09 15:28:27 crc kubenswrapper[4716]: E1209 15:28:27.352265 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b5a74069-1167-4e2a-a3b2-c11507b121ab-cert podName:b5a74069-1167-4e2a-a3b2-c11507b121ab nodeName:}" failed. No retries permitted until 2025-12-09 15:28:27.852247877 +0000 UTC m=+1195.006991865 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b5a74069-1167-4e2a-a3b2-c11507b121ab-cert") pod "infra-operator-controller-manager-78d48bff9d-sktgh" (UID: "b5a74069-1167-4e2a-a3b2-c11507b121ab") : secret "infra-operator-webhook-server-cert" not found Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.359698 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-qgr4j"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.368726 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-trpzz" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.378881 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-qgr4j" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.384775 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-sm4k4" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.387974 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-xzp8n" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.394279 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fk2qf\" (UniqueName: \"kubernetes.io/projected/87589b07-5c3c-46c9-b84e-ffc2efa3b817-kube-api-access-fk2qf\") pod \"horizon-operator-controller-manager-68c6d99b8f-9d9ms\" (UID: \"87589b07-5c3c-46c9-b84e-ffc2efa3b817\") " pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-9d9ms" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.438327 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-p6hrb" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.438542 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-z2l9s" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.441959 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x7vkk\" (UniqueName: \"kubernetes.io/projected/b60a4ca3-f2c6-4829-bc4f-d47b0740e378-kube-api-access-x7vkk\") pod \"glance-operator-controller-manager-5697bb5779-s7mv7\" (UID: \"b60a4ca3-f2c6-4829-bc4f-d47b0740e378\") " pod="openstack-operators/glance-operator-controller-manager-5697bb5779-s7mv7" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.472536 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qctqv\" (UniqueName: \"kubernetes.io/projected/c2869bc2-fe1e-49a0-9bd5-ec2b1a23b489-kube-api-access-qctqv\") pod \"heat-operator-controller-manager-5f64f6f8bb-2ft2d\" (UID: \"c2869bc2-fe1e-49a0-9bd5-ec2b1a23b489\") " pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-2ft2d" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.474722 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w8kdh\" (UniqueName: \"kubernetes.io/projected/695a0bab-c2c8-4c7d-9420-2dc191000e54-kube-api-access-w8kdh\") pod \"ironic-operator-controller-manager-967d97867-hq8hn\" (UID: \"695a0bab-c2c8-4c7d-9420-2dc191000e54\") " pod="openstack-operators/ironic-operator-controller-manager-967d97867-hq8hn" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.481619 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jgglz\" (UniqueName: \"kubernetes.io/projected/86831782-f8f0-40ad-99f7-4568185065b0-kube-api-access-jgglz\") pod \"neutron-operator-controller-manager-5fdfd5b6b5-qgr4j\" (UID: \"86831782-f8f0-40ad-99f7-4568185065b0\") " pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-qgr4j" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.481935 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fklkt\" (UniqueName: \"kubernetes.io/projected/30c82b79-c03d-45d3-8b0a-ca506daf2934-kube-api-access-fklkt\") pod \"manila-operator-controller-manager-5b5fd79c9c-dzpdl\" (UID: \"30c82b79-c03d-45d3-8b0a-ca506daf2934\") " pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-dzpdl" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.482069 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q9v2k\" (UniqueName: \"kubernetes.io/projected/436bb095-8486-4d67-9fc5-6596738065cc-kube-api-access-q9v2k\") pod \"keystone-operator-controller-manager-7765d96ddf-szpds\" (UID: \"436bb095-8486-4d67-9fc5-6596738065cc\") " pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-szpds" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.485896 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-79c8c4686c-5pz92"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.486458 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8qb96\" (UniqueName: \"kubernetes.io/projected/b5a74069-1167-4e2a-a3b2-c11507b121ab-kube-api-access-8qb96\") pod \"infra-operator-controller-manager-78d48bff9d-sktgh\" (UID: \"b5a74069-1167-4e2a-a3b2-c11507b121ab\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-sktgh" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.493550 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-s7mv7" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.526064 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-qgr4j"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.527866 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-2ft2d" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.533369 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q9v2k\" (UniqueName: \"kubernetes.io/projected/436bb095-8486-4d67-9fc5-6596738065cc-kube-api-access-q9v2k\") pod \"keystone-operator-controller-manager-7765d96ddf-szpds\" (UID: \"436bb095-8486-4d67-9fc5-6596738065cc\") " pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-szpds" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.541449 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fklkt\" (UniqueName: \"kubernetes.io/projected/30c82b79-c03d-45d3-8b0a-ca506daf2934-kube-api-access-fklkt\") pod \"manila-operator-controller-manager-5b5fd79c9c-dzpdl\" (UID: \"30c82b79-c03d-45d3-8b0a-ca506daf2934\") " pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-dzpdl" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.543500 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w8kdh\" (UniqueName: \"kubernetes.io/projected/695a0bab-c2c8-4c7d-9420-2dc191000e54-kube-api-access-w8kdh\") pod \"ironic-operator-controller-manager-967d97867-hq8hn\" (UID: \"695a0bab-c2c8-4c7d-9420-2dc191000e54\") " pod="openstack-operators/ironic-operator-controller-manager-967d97867-hq8hn" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.563056 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-9d9ms" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.576352 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-967d97867-hq8hn" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.578040 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-697bc559fc-pmct6"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.587603 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jgglz\" (UniqueName: \"kubernetes.io/projected/86831782-f8f0-40ad-99f7-4568185065b0-kube-api-access-jgglz\") pod \"neutron-operator-controller-manager-5fdfd5b6b5-qgr4j\" (UID: \"86831782-f8f0-40ad-99f7-4568185065b0\") " pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-qgr4j" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.587731 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pf4j7\" (UniqueName: \"kubernetes.io/projected/8da5796c-fddb-414b-8d26-6657d25e6c00-kube-api-access-pf4j7\") pod \"mariadb-operator-controller-manager-79c8c4686c-5pz92\" (UID: \"8da5796c-fddb-414b-8d26-6657d25e6c00\") " pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-5pz92" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.587820 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s6sk8\" (UniqueName: \"kubernetes.io/projected/f1029307-bcf9-40c0-b656-b7d203493022-kube-api-access-s6sk8\") pod \"nova-operator-controller-manager-697bc559fc-pmct6\" (UID: \"f1029307-bcf9-40c0-b656-b7d203493022\") " pod="openstack-operators/nova-operator-controller-manager-697bc559fc-pmct6" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.600219 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-dzpdl" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.600461 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-998648c74-kgr69"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.602893 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-998648c74-kgr69" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.605356 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-kpfwm" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.606533 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-998648c74-kgr69"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.629902 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdbbht"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.636803 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdbbht" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.641851 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-tnkd2" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.642298 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.653090 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-b6456fdb6-649h2"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.654842 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-649h2" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.659384 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-kwkdg" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.678671 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-b6456fdb6-649h2"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.688763 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jgglz\" (UniqueName: \"kubernetes.io/projected/86831782-f8f0-40ad-99f7-4568185065b0-kube-api-access-jgglz\") pod \"neutron-operator-controller-manager-5fdfd5b6b5-qgr4j\" (UID: \"86831782-f8f0-40ad-99f7-4568185065b0\") " pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-qgr4j" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.691547 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pf4j7\" (UniqueName: \"kubernetes.io/projected/8da5796c-fddb-414b-8d26-6657d25e6c00-kube-api-access-pf4j7\") pod \"mariadb-operator-controller-manager-79c8c4686c-5pz92\" (UID: \"8da5796c-fddb-414b-8d26-6657d25e6c00\") " pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-5pz92" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.691793 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s6sk8\" (UniqueName: \"kubernetes.io/projected/f1029307-bcf9-40c0-b656-b7d203493022-kube-api-access-s6sk8\") pod \"nova-operator-controller-manager-697bc559fc-pmct6\" (UID: \"f1029307-bcf9-40c0-b656-b7d203493022\") " pod="openstack-operators/nova-operator-controller-manager-697bc559fc-pmct6" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.692000 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-78lfq\" (UniqueName: \"kubernetes.io/projected/6890a044-24cb-49b2-9d60-105fa8e57573-kube-api-access-78lfq\") pod \"octavia-operator-controller-manager-998648c74-kgr69\" (UID: \"6890a044-24cb-49b2-9d60-105fa8e57573\") " pod="openstack-operators/octavia-operator-controller-manager-998648c74-kgr69" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.700222 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-78f8948974-fwnp7"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.707753 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-78f8948974-fwnp7"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.707857 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-78f8948974-fwnp7" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.709490 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-4m8r9" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.724895 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdbbht"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.743371 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-9d58d64bc-h4nlt"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.748392 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-szpds" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.751277 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-h4nlt" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.754251 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-nn7st" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.754821 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s6sk8\" (UniqueName: \"kubernetes.io/projected/f1029307-bcf9-40c0-b656-b7d203493022-kube-api-access-s6sk8\") pod \"nova-operator-controller-manager-697bc559fc-pmct6\" (UID: \"f1029307-bcf9-40c0-b656-b7d203493022\") " pod="openstack-operators/nova-operator-controller-manager-697bc559fc-pmct6" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.759809 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-5656d9bf69-llf2c"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.760422 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pf4j7\" (UniqueName: \"kubernetes.io/projected/8da5796c-fddb-414b-8d26-6657d25e6c00-kube-api-access-pf4j7\") pod \"mariadb-operator-controller-manager-79c8c4686c-5pz92\" (UID: \"8da5796c-fddb-414b-8d26-6657d25e6c00\") " pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-5pz92" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.761960 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-5656d9bf69-llf2c" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.764526 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-lwb4s" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.776476 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-5854674fcc-kspms"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.784078 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5854674fcc-kspms" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.786558 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-9hx85" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.795091 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-78lfq\" (UniqueName: \"kubernetes.io/projected/6890a044-24cb-49b2-9d60-105fa8e57573-kube-api-access-78lfq\") pod \"octavia-operator-controller-manager-998648c74-kgr69\" (UID: \"6890a044-24cb-49b2-9d60-105fa8e57573\") " pod="openstack-operators/octavia-operator-controller-manager-998648c74-kgr69" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.795216 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-slpjz\" (UniqueName: \"kubernetes.io/projected/05653cf9-bc33-44bb-acdd-21dd610a7665-kube-api-access-slpjz\") pod \"openstack-baremetal-operator-controller-manager-84b575879fdbbht\" (UID: \"05653cf9-bc33-44bb-acdd-21dd610a7665\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdbbht" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.795288 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-snwqd\" (UniqueName: \"kubernetes.io/projected/d5727f03-aede-454c-8dec-17e10986da51-kube-api-access-snwqd\") pod \"ovn-operator-controller-manager-b6456fdb6-649h2\" (UID: \"d5727f03-aede-454c-8dec-17e10986da51\") " pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-649h2" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.795345 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wj2sv\" (UniqueName: \"kubernetes.io/projected/43243e09-47d6-428c-adc6-3542056106b5-kube-api-access-wj2sv\") pod \"placement-operator-controller-manager-78f8948974-fwnp7\" (UID: \"43243e09-47d6-428c-adc6-3542056106b5\") " pod="openstack-operators/placement-operator-controller-manager-78f8948974-fwnp7" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.795403 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/05653cf9-bc33-44bb-acdd-21dd610a7665-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fdbbht\" (UID: \"05653cf9-bc33-44bb-acdd-21dd610a7665\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdbbht" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.795538 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-9d58d64bc-h4nlt"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.795573 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5854674fcc-kspms"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.806762 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-5656d9bf69-llf2c"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.829221 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-78lfq\" (UniqueName: \"kubernetes.io/projected/6890a044-24cb-49b2-9d60-105fa8e57573-kube-api-access-78lfq\") pod \"octavia-operator-controller-manager-998648c74-kgr69\" (UID: \"6890a044-24cb-49b2-9d60-105fa8e57573\") " pod="openstack-operators/octavia-operator-controller-manager-998648c74-kgr69" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.878424 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-667bd8d554-s7dkv"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.880385 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-s7dkv" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.883326 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-q92p7" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.896677 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b5a74069-1167-4e2a-a3b2-c11507b121ab-cert\") pod \"infra-operator-controller-manager-78d48bff9d-sktgh\" (UID: \"b5a74069-1167-4e2a-a3b2-c11507b121ab\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-sktgh" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.896734 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-slpjz\" (UniqueName: \"kubernetes.io/projected/05653cf9-bc33-44bb-acdd-21dd610a7665-kube-api-access-slpjz\") pod \"openstack-baremetal-operator-controller-manager-84b575879fdbbht\" (UID: \"05653cf9-bc33-44bb-acdd-21dd610a7665\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdbbht" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.896771 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mgrxx\" (UniqueName: \"kubernetes.io/projected/4d332287-0f6e-46d1-9cd9-d31dd855d753-kube-api-access-mgrxx\") pod \"swift-operator-controller-manager-9d58d64bc-h4nlt\" (UID: \"4d332287-0f6e-46d1-9cd9-d31dd855d753\") " pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-h4nlt" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.896838 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-snwqd\" (UniqueName: \"kubernetes.io/projected/d5727f03-aede-454c-8dec-17e10986da51-kube-api-access-snwqd\") pod \"ovn-operator-controller-manager-b6456fdb6-649h2\" (UID: \"d5727f03-aede-454c-8dec-17e10986da51\") " pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-649h2" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.896897 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-84vh4\" (UniqueName: \"kubernetes.io/projected/ae27699b-b83f-49e3-a0f0-a1fd2fc4c6f2-kube-api-access-84vh4\") pod \"test-operator-controller-manager-5854674fcc-kspms\" (UID: \"ae27699b-b83f-49e3-a0f0-a1fd2fc4c6f2\") " pod="openstack-operators/test-operator-controller-manager-5854674fcc-kspms" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.896932 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wj2sv\" (UniqueName: \"kubernetes.io/projected/43243e09-47d6-428c-adc6-3542056106b5-kube-api-access-wj2sv\") pod \"placement-operator-controller-manager-78f8948974-fwnp7\" (UID: \"43243e09-47d6-428c-adc6-3542056106b5\") " pod="openstack-operators/placement-operator-controller-manager-78f8948974-fwnp7" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.896981 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/05653cf9-bc33-44bb-acdd-21dd610a7665-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fdbbht\" (UID: \"05653cf9-bc33-44bb-acdd-21dd610a7665\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdbbht" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.897067 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nc9fz\" (UniqueName: \"kubernetes.io/projected/57b68343-4540-4097-9f68-a538c63bae3b-kube-api-access-nc9fz\") pod \"telemetry-operator-controller-manager-5656d9bf69-llf2c\" (UID: \"57b68343-4540-4097-9f68-a538c63bae3b\") " pod="openstack-operators/telemetry-operator-controller-manager-5656d9bf69-llf2c" Dec 09 15:28:27 crc kubenswrapper[4716]: E1209 15:28:27.897237 4716 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 09 15:28:27 crc kubenswrapper[4716]: E1209 15:28:27.897290 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b5a74069-1167-4e2a-a3b2-c11507b121ab-cert podName:b5a74069-1167-4e2a-a3b2-c11507b121ab nodeName:}" failed. No retries permitted until 2025-12-09 15:28:28.897271197 +0000 UTC m=+1196.052015195 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b5a74069-1167-4e2a-a3b2-c11507b121ab-cert") pod "infra-operator-controller-manager-78d48bff9d-sktgh" (UID: "b5a74069-1167-4e2a-a3b2-c11507b121ab") : secret "infra-operator-webhook-server-cert" not found Dec 09 15:28:27 crc kubenswrapper[4716]: E1209 15:28:27.898053 4716 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 09 15:28:27 crc kubenswrapper[4716]: E1209 15:28:27.898089 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/05653cf9-bc33-44bb-acdd-21dd610a7665-cert podName:05653cf9-bc33-44bb-acdd-21dd610a7665 nodeName:}" failed. No retries permitted until 2025-12-09 15:28:28.39807894 +0000 UTC m=+1195.552822928 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/05653cf9-bc33-44bb-acdd-21dd610a7665-cert") pod "openstack-baremetal-operator-controller-manager-84b575879fdbbht" (UID: "05653cf9-bc33-44bb-acdd-21dd610a7665") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.902313 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-667bd8d554-s7dkv"] Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.950967 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-5pz92" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.966952 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-pmct6" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.970076 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-slpjz\" (UniqueName: \"kubernetes.io/projected/05653cf9-bc33-44bb-acdd-21dd610a7665-kube-api-access-slpjz\") pod \"openstack-baremetal-operator-controller-manager-84b575879fdbbht\" (UID: \"05653cf9-bc33-44bb-acdd-21dd610a7665\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdbbht" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.983980 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wj2sv\" (UniqueName: \"kubernetes.io/projected/43243e09-47d6-428c-adc6-3542056106b5-kube-api-access-wj2sv\") pod \"placement-operator-controller-manager-78f8948974-fwnp7\" (UID: \"43243e09-47d6-428c-adc6-3542056106b5\") " pod="openstack-operators/placement-operator-controller-manager-78f8948974-fwnp7" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.985914 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-qgr4j" Dec 09 15:28:27 crc kubenswrapper[4716]: I1209 15:28:27.987097 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-snwqd\" (UniqueName: \"kubernetes.io/projected/d5727f03-aede-454c-8dec-17e10986da51-kube-api-access-snwqd\") pod \"ovn-operator-controller-manager-b6456fdb6-649h2\" (UID: \"d5727f03-aede-454c-8dec-17e10986da51\") " pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-649h2" Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.020888 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nc9fz\" (UniqueName: \"kubernetes.io/projected/57b68343-4540-4097-9f68-a538c63bae3b-kube-api-access-nc9fz\") pod \"telemetry-operator-controller-manager-5656d9bf69-llf2c\" (UID: \"57b68343-4540-4097-9f68-a538c63bae3b\") " pod="openstack-operators/telemetry-operator-controller-manager-5656d9bf69-llf2c" Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.021306 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mgrxx\" (UniqueName: \"kubernetes.io/projected/4d332287-0f6e-46d1-9cd9-d31dd855d753-kube-api-access-mgrxx\") pod \"swift-operator-controller-manager-9d58d64bc-h4nlt\" (UID: \"4d332287-0f6e-46d1-9cd9-d31dd855d753\") " pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-h4nlt" Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.022894 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-84vh4\" (UniqueName: \"kubernetes.io/projected/ae27699b-b83f-49e3-a0f0-a1fd2fc4c6f2-kube-api-access-84vh4\") pod \"test-operator-controller-manager-5854674fcc-kspms\" (UID: \"ae27699b-b83f-49e3-a0f0-a1fd2fc4c6f2\") " pod="openstack-operators/test-operator-controller-manager-5854674fcc-kspms" Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.041295 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-67lgp\" (UniqueName: \"kubernetes.io/projected/368b843d-fa53-406c-a5a8-a78a8763718d-kube-api-access-67lgp\") pod \"watcher-operator-controller-manager-667bd8d554-s7dkv\" (UID: \"368b843d-fa53-406c-a5a8-a78a8763718d\") " pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-s7dkv" Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.066510 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mgrxx\" (UniqueName: \"kubernetes.io/projected/4d332287-0f6e-46d1-9cd9-d31dd855d753-kube-api-access-mgrxx\") pod \"swift-operator-controller-manager-9d58d64bc-h4nlt\" (UID: \"4d332287-0f6e-46d1-9cd9-d31dd855d753\") " pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-h4nlt" Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.075662 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nc9fz\" (UniqueName: \"kubernetes.io/projected/57b68343-4540-4097-9f68-a538c63bae3b-kube-api-access-nc9fz\") pod \"telemetry-operator-controller-manager-5656d9bf69-llf2c\" (UID: \"57b68343-4540-4097-9f68-a538c63bae3b\") " pod="openstack-operators/telemetry-operator-controller-manager-5656d9bf69-llf2c" Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.089104 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-84vh4\" (UniqueName: \"kubernetes.io/projected/ae27699b-b83f-49e3-a0f0-a1fd2fc4c6f2-kube-api-access-84vh4\") pod \"test-operator-controller-manager-5854674fcc-kspms\" (UID: \"ae27699b-b83f-49e3-a0f0-a1fd2fc4c6f2\") " pod="openstack-operators/test-operator-controller-manager-5854674fcc-kspms" Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.090048 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-5955d8c9f-mpmbx"] Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.091552 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-5955d8c9f-mpmbx" Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.109057 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.109133 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-998648c74-kgr69" Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.109366 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-ph5qp" Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.109472 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.153933 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-649h2" Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.158510 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-67lgp\" (UniqueName: \"kubernetes.io/projected/368b843d-fa53-406c-a5a8-a78a8763718d-kube-api-access-67lgp\") pod \"watcher-operator-controller-manager-667bd8d554-s7dkv\" (UID: \"368b843d-fa53-406c-a5a8-a78a8763718d\") " pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-s7dkv" Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.168558 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-5955d8c9f-mpmbx"] Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.176937 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-78f8948974-fwnp7" Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.199703 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-tf7dx"] Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.200919 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-h4nlt" Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.201418 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-tf7dx" Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.226195 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-tf7dx"] Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.228048 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-rgbsc" Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.243390 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-5656d9bf69-llf2c" Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.256417 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-67lgp\" (UniqueName: \"kubernetes.io/projected/368b843d-fa53-406c-a5a8-a78a8763718d-kube-api-access-67lgp\") pod \"watcher-operator-controller-manager-667bd8d554-s7dkv\" (UID: \"368b843d-fa53-406c-a5a8-a78a8763718d\") " pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-s7dkv" Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.262397 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-webhook-certs\") pod \"openstack-operator-controller-manager-5955d8c9f-mpmbx\" (UID: \"3221ee7d-c104-4055-961a-46cd6ba8c602\") " pod="openstack-operators/openstack-operator-controller-manager-5955d8c9f-mpmbx" Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.262511 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-748br\" (UniqueName: \"kubernetes.io/projected/3221ee7d-c104-4055-961a-46cd6ba8c602-kube-api-access-748br\") pod \"openstack-operator-controller-manager-5955d8c9f-mpmbx\" (UID: \"3221ee7d-c104-4055-961a-46cd6ba8c602\") " pod="openstack-operators/openstack-operator-controller-manager-5955d8c9f-mpmbx" Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.262707 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-metrics-certs\") pod \"openstack-operator-controller-manager-5955d8c9f-mpmbx\" (UID: \"3221ee7d-c104-4055-961a-46cd6ba8c602\") " pod="openstack-operators/openstack-operator-controller-manager-5955d8c9f-mpmbx" Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.299343 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5854674fcc-kspms" Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.365355 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-metrics-certs\") pod \"openstack-operator-controller-manager-5955d8c9f-mpmbx\" (UID: \"3221ee7d-c104-4055-961a-46cd6ba8c602\") " pod="openstack-operators/openstack-operator-controller-manager-5955d8c9f-mpmbx" Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.365762 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rh5qh\" (UniqueName: \"kubernetes.io/projected/08b1c536-2d1d-42fc-9e18-87ebe38bed16-kube-api-access-rh5qh\") pod \"rabbitmq-cluster-operator-manager-668c99d594-tf7dx\" (UID: \"08b1c536-2d1d-42fc-9e18-87ebe38bed16\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-tf7dx" Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.365879 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-webhook-certs\") pod \"openstack-operator-controller-manager-5955d8c9f-mpmbx\" (UID: \"3221ee7d-c104-4055-961a-46cd6ba8c602\") " pod="openstack-operators/openstack-operator-controller-manager-5955d8c9f-mpmbx" Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.365945 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-748br\" (UniqueName: \"kubernetes.io/projected/3221ee7d-c104-4055-961a-46cd6ba8c602-kube-api-access-748br\") pod \"openstack-operator-controller-manager-5955d8c9f-mpmbx\" (UID: \"3221ee7d-c104-4055-961a-46cd6ba8c602\") " pod="openstack-operators/openstack-operator-controller-manager-5955d8c9f-mpmbx" Dec 09 15:28:28 crc kubenswrapper[4716]: E1209 15:28:28.366395 4716 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 09 15:28:28 crc kubenswrapper[4716]: E1209 15:28:28.366438 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-webhook-certs podName:3221ee7d-c104-4055-961a-46cd6ba8c602 nodeName:}" failed. No retries permitted until 2025-12-09 15:28:28.866424014 +0000 UTC m=+1196.021168002 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-webhook-certs") pod "openstack-operator-controller-manager-5955d8c9f-mpmbx" (UID: "3221ee7d-c104-4055-961a-46cd6ba8c602") : secret "webhook-server-cert" not found Dec 09 15:28:28 crc kubenswrapper[4716]: E1209 15:28:28.366585 4716 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 09 15:28:28 crc kubenswrapper[4716]: E1209 15:28:28.366607 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-metrics-certs podName:3221ee7d-c104-4055-961a-46cd6ba8c602 nodeName:}" failed. No retries permitted until 2025-12-09 15:28:28.866600639 +0000 UTC m=+1196.021344627 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-metrics-certs") pod "openstack-operator-controller-manager-5955d8c9f-mpmbx" (UID: "3221ee7d-c104-4055-961a-46cd6ba8c602") : secret "metrics-server-cert" not found Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.391154 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-s7dkv" Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.406544 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-748br\" (UniqueName: \"kubernetes.io/projected/3221ee7d-c104-4055-961a-46cd6ba8c602-kube-api-access-748br\") pod \"openstack-operator-controller-manager-5955d8c9f-mpmbx\" (UID: \"3221ee7d-c104-4055-961a-46cd6ba8c602\") " pod="openstack-operators/openstack-operator-controller-manager-5955d8c9f-mpmbx" Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.470429 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/05653cf9-bc33-44bb-acdd-21dd610a7665-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fdbbht\" (UID: \"05653cf9-bc33-44bb-acdd-21dd610a7665\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdbbht" Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.470514 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rh5qh\" (UniqueName: \"kubernetes.io/projected/08b1c536-2d1d-42fc-9e18-87ebe38bed16-kube-api-access-rh5qh\") pod \"rabbitmq-cluster-operator-manager-668c99d594-tf7dx\" (UID: \"08b1c536-2d1d-42fc-9e18-87ebe38bed16\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-tf7dx" Dec 09 15:28:28 crc kubenswrapper[4716]: E1209 15:28:28.471097 4716 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 09 15:28:28 crc kubenswrapper[4716]: E1209 15:28:28.471145 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/05653cf9-bc33-44bb-acdd-21dd610a7665-cert podName:05653cf9-bc33-44bb-acdd-21dd610a7665 nodeName:}" failed. No retries permitted until 2025-12-09 15:28:29.471131836 +0000 UTC m=+1196.625875824 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/05653cf9-bc33-44bb-acdd-21dd610a7665-cert") pod "openstack-baremetal-operator-controller-manager-84b575879fdbbht" (UID: "05653cf9-bc33-44bb-acdd-21dd610a7665") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.534881 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rh5qh\" (UniqueName: \"kubernetes.io/projected/08b1c536-2d1d-42fc-9e18-87ebe38bed16-kube-api-access-rh5qh\") pod \"rabbitmq-cluster-operator-manager-668c99d594-tf7dx\" (UID: \"08b1c536-2d1d-42fc-9e18-87ebe38bed16\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-tf7dx" Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.584040 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-tf7dx" Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.848269 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6c677c69b-xzp8n"] Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.883854 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-webhook-certs\") pod \"openstack-operator-controller-manager-5955d8c9f-mpmbx\" (UID: \"3221ee7d-c104-4055-961a-46cd6ba8c602\") " pod="openstack-operators/openstack-operator-controller-manager-5955d8c9f-mpmbx" Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.883966 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-metrics-certs\") pod \"openstack-operator-controller-manager-5955d8c9f-mpmbx\" (UID: \"3221ee7d-c104-4055-961a-46cd6ba8c602\") " pod="openstack-operators/openstack-operator-controller-manager-5955d8c9f-mpmbx" Dec 09 15:28:28 crc kubenswrapper[4716]: E1209 15:28:28.884148 4716 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 09 15:28:28 crc kubenswrapper[4716]: E1209 15:28:28.884199 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-metrics-certs podName:3221ee7d-c104-4055-961a-46cd6ba8c602 nodeName:}" failed. No retries permitted until 2025-12-09 15:28:29.88418378 +0000 UTC m=+1197.038927768 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-metrics-certs") pod "openstack-operator-controller-manager-5955d8c9f-mpmbx" (UID: "3221ee7d-c104-4055-961a-46cd6ba8c602") : secret "metrics-server-cert" not found Dec 09 15:28:28 crc kubenswrapper[4716]: E1209 15:28:28.884983 4716 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 09 15:28:28 crc kubenswrapper[4716]: E1209 15:28:28.887020 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-webhook-certs podName:3221ee7d-c104-4055-961a-46cd6ba8c602 nodeName:}" failed. No retries permitted until 2025-12-09 15:28:29.88699675 +0000 UTC m=+1197.041740738 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-webhook-certs") pod "openstack-operator-controller-manager-5955d8c9f-mpmbx" (UID: "3221ee7d-c104-4055-961a-46cd6ba8c602") : secret "webhook-server-cert" not found Dec 09 15:28:28 crc kubenswrapper[4716]: I1209 15:28:28.987737 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b5a74069-1167-4e2a-a3b2-c11507b121ab-cert\") pod \"infra-operator-controller-manager-78d48bff9d-sktgh\" (UID: \"b5a74069-1167-4e2a-a3b2-c11507b121ab\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-sktgh" Dec 09 15:28:28 crc kubenswrapper[4716]: E1209 15:28:28.987975 4716 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 09 15:28:28 crc kubenswrapper[4716]: E1209 15:28:28.988054 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b5a74069-1167-4e2a-a3b2-c11507b121ab-cert podName:b5a74069-1167-4e2a-a3b2-c11507b121ab nodeName:}" failed. No retries permitted until 2025-12-09 15:28:30.988027417 +0000 UTC m=+1198.142771405 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b5a74069-1167-4e2a-a3b2-c11507b121ab-cert") pod "infra-operator-controller-manager-78d48bff9d-sktgh" (UID: "b5a74069-1167-4e2a-a3b2-c11507b121ab") : secret "infra-operator-webhook-server-cert" not found Dec 09 15:28:29 crc kubenswrapper[4716]: I1209 15:28:29.245695 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-xzp8n" event={"ID":"266cbf21-b738-4045-902b-88deadcc5869","Type":"ContainerStarted","Data":"d7f1b3124f00ffef91ad6ec42c00d4b6d7bd1ea2f5d537fdc47b35d35bfbe550"} Dec 09 15:28:29 crc kubenswrapper[4716]: W1209 15:28:29.336763 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc2869bc2_fe1e_49a0_9bd5_ec2b1a23b489.slice/crio-2879e8b363c3a979689a690883b925a11e1e2ee5cc6d17668bc0abd33248acd7 WatchSource:0}: Error finding container 2879e8b363c3a979689a690883b925a11e1e2ee5cc6d17668bc0abd33248acd7: Status 404 returned error can't find the container with id 2879e8b363c3a979689a690883b925a11e1e2ee5cc6d17668bc0abd33248acd7 Dec 09 15:28:29 crc kubenswrapper[4716]: I1209 15:28:29.339504 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5f64f6f8bb-2ft2d"] Dec 09 15:28:29 crc kubenswrapper[4716]: I1209 15:28:29.347919 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c6d99b8f-9d9ms"] Dec 09 15:28:29 crc kubenswrapper[4716]: I1209 15:28:29.395807 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7d9dfd778-z2l9s"] Dec 09 15:28:29 crc kubenswrapper[4716]: I1209 15:28:29.498880 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/05653cf9-bc33-44bb-acdd-21dd610a7665-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fdbbht\" (UID: \"05653cf9-bc33-44bb-acdd-21dd610a7665\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdbbht" Dec 09 15:28:29 crc kubenswrapper[4716]: E1209 15:28:29.498999 4716 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 09 15:28:29 crc kubenswrapper[4716]: E1209 15:28:29.499065 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/05653cf9-bc33-44bb-acdd-21dd610a7665-cert podName:05653cf9-bc33-44bb-acdd-21dd610a7665 nodeName:}" failed. No retries permitted until 2025-12-09 15:28:31.499048203 +0000 UTC m=+1198.653792191 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/05653cf9-bc33-44bb-acdd-21dd610a7665-cert") pod "openstack-baremetal-operator-controller-manager-84b575879fdbbht" (UID: "05653cf9-bc33-44bb-acdd-21dd610a7665") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 09 15:28:29 crc kubenswrapper[4716]: I1209 15:28:29.821842 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-79c8c4686c-5pz92"] Dec 09 15:28:29 crc kubenswrapper[4716]: I1209 15:28:29.838860 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-78f8948974-fwnp7"] Dec 09 15:28:29 crc kubenswrapper[4716]: W1209 15:28:29.868247 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod43243e09_47d6_428c_adc6_3542056106b5.slice/crio-8d0e807d069807a6b680f0f4134bac4c6ae9a77345cce5bf611cba1034e74ef7 WatchSource:0}: Error finding container 8d0e807d069807a6b680f0f4134bac4c6ae9a77345cce5bf611cba1034e74ef7: Status 404 returned error can't find the container with id 8d0e807d069807a6b680f0f4134bac4c6ae9a77345cce5bf611cba1034e74ef7 Dec 09 15:28:29 crc kubenswrapper[4716]: W1209 15:28:29.870389 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod86831782_f8f0_40ad_99f7_4568185065b0.slice/crio-090e6fce7eea70eb20e06755d2e12bbd3a7ed5bff7e557a29b92ced96108f918 WatchSource:0}: Error finding container 090e6fce7eea70eb20e06755d2e12bbd3a7ed5bff7e557a29b92ced96108f918: Status 404 returned error can't find the container with id 090e6fce7eea70eb20e06755d2e12bbd3a7ed5bff7e557a29b92ced96108f918 Dec 09 15:28:29 crc kubenswrapper[4716]: I1209 15:28:29.880509 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-qgr4j"] Dec 09 15:28:29 crc kubenswrapper[4716]: I1209 15:28:29.886981 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-5697bb5779-s7mv7"] Dec 09 15:28:29 crc kubenswrapper[4716]: I1209 15:28:29.916107 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-webhook-certs\") pod \"openstack-operator-controller-manager-5955d8c9f-mpmbx\" (UID: \"3221ee7d-c104-4055-961a-46cd6ba8c602\") " pod="openstack-operators/openstack-operator-controller-manager-5955d8c9f-mpmbx" Dec 09 15:28:29 crc kubenswrapper[4716]: I1209 15:28:29.916229 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-metrics-certs\") pod \"openstack-operator-controller-manager-5955d8c9f-mpmbx\" (UID: \"3221ee7d-c104-4055-961a-46cd6ba8c602\") " pod="openstack-operators/openstack-operator-controller-manager-5955d8c9f-mpmbx" Dec 09 15:28:29 crc kubenswrapper[4716]: E1209 15:28:29.916392 4716 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 09 15:28:29 crc kubenswrapper[4716]: E1209 15:28:29.916445 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-metrics-certs podName:3221ee7d-c104-4055-961a-46cd6ba8c602 nodeName:}" failed. No retries permitted until 2025-12-09 15:28:31.91643071 +0000 UTC m=+1199.071174698 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-metrics-certs") pod "openstack-operator-controller-manager-5955d8c9f-mpmbx" (UID: "3221ee7d-c104-4055-961a-46cd6ba8c602") : secret "metrics-server-cert" not found Dec 09 15:28:29 crc kubenswrapper[4716]: E1209 15:28:29.916786 4716 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 09 15:28:29 crc kubenswrapper[4716]: E1209 15:28:29.916816 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-webhook-certs podName:3221ee7d-c104-4055-961a-46cd6ba8c602 nodeName:}" failed. No retries permitted until 2025-12-09 15:28:31.916807661 +0000 UTC m=+1199.071551639 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-webhook-certs") pod "openstack-operator-controller-manager-5955d8c9f-mpmbx" (UID: "3221ee7d-c104-4055-961a-46cd6ba8c602") : secret "webhook-server-cert" not found Dec 09 15:28:30 crc kubenswrapper[4716]: I1209 15:28:30.078334 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-697fb699cf-p6hrb"] Dec 09 15:28:30 crc kubenswrapper[4716]: I1209 15:28:30.249527 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-p6hrb" event={"ID":"ee92f03c-d515-4aa3-ad3c-cce0c89fd12b","Type":"ContainerStarted","Data":"d80d60f51c0a6fcb8a9dc5bccaddfd07f7ea61fcd9b3fa0d44a3bb8d79f835b8"} Dec 09 15:28:30 crc kubenswrapper[4716]: I1209 15:28:30.253312 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-2ft2d" event={"ID":"c2869bc2-fe1e-49a0-9bd5-ec2b1a23b489","Type":"ContainerStarted","Data":"2879e8b363c3a979689a690883b925a11e1e2ee5cc6d17668bc0abd33248acd7"} Dec 09 15:28:30 crc kubenswrapper[4716]: I1209 15:28:30.254612 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-z2l9s" event={"ID":"33b32911-25d0-45d5-8009-4d9787875e86","Type":"ContainerStarted","Data":"4527c36934a38b515759b52e09d155fb2f30b1b737fbf63c745a37293dd8dcc1"} Dec 09 15:28:30 crc kubenswrapper[4716]: I1209 15:28:30.257152 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-78f8948974-fwnp7" event={"ID":"43243e09-47d6-428c-adc6-3542056106b5","Type":"ContainerStarted","Data":"8d0e807d069807a6b680f0f4134bac4c6ae9a77345cce5bf611cba1034e74ef7"} Dec 09 15:28:30 crc kubenswrapper[4716]: I1209 15:28:30.259203 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-9d9ms" event={"ID":"87589b07-5c3c-46c9-b84e-ffc2efa3b817","Type":"ContainerStarted","Data":"c9a2d87b2a9e7cb7d3fe7f60f2de0ffa958d4abc8177e73ab0591ae632ea2692"} Dec 09 15:28:30 crc kubenswrapper[4716]: I1209 15:28:30.260617 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-s7mv7" event={"ID":"b60a4ca3-f2c6-4829-bc4f-d47b0740e378","Type":"ContainerStarted","Data":"88ee7c949d8bea83e38f44983e6807edf66926663d5b99c1d7ffb7492e3356a5"} Dec 09 15:28:30 crc kubenswrapper[4716]: I1209 15:28:30.262449 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-5pz92" event={"ID":"8da5796c-fddb-414b-8d26-6657d25e6c00","Type":"ContainerStarted","Data":"20a19adac32d57ea536645805382e7f32aeb777b43717251809bf875bfdbceba"} Dec 09 15:28:30 crc kubenswrapper[4716]: I1209 15:28:30.269834 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-qgr4j" event={"ID":"86831782-f8f0-40ad-99f7-4568185065b0","Type":"ContainerStarted","Data":"090e6fce7eea70eb20e06755d2e12bbd3a7ed5bff7e557a29b92ced96108f918"} Dec 09 15:28:30 crc kubenswrapper[4716]: I1209 15:28:30.732537 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-967d97867-hq8hn"] Dec 09 15:28:30 crc kubenswrapper[4716]: I1209 15:28:30.772090 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-9d58d64bc-h4nlt"] Dec 09 15:28:30 crc kubenswrapper[4716]: I1209 15:28:30.788109 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-5656d9bf69-llf2c"] Dec 09 15:28:30 crc kubenswrapper[4716]: I1209 15:28:30.793589 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7765d96ddf-szpds"] Dec 09 15:28:30 crc kubenswrapper[4716]: I1209 15:28:30.807931 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-b6456fdb6-649h2"] Dec 09 15:28:30 crc kubenswrapper[4716]: W1209 15:28:30.810392 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4d332287_0f6e_46d1_9cd9_d31dd855d753.slice/crio-6b87ad8bd5e47e7f896ba7554de076f398b4bd14938c08394f68b15d13de5b01 WatchSource:0}: Error finding container 6b87ad8bd5e47e7f896ba7554de076f398b4bd14938c08394f68b15d13de5b01: Status 404 returned error can't find the container with id 6b87ad8bd5e47e7f896ba7554de076f398b4bd14938c08394f68b15d13de5b01 Dec 09 15:28:30 crc kubenswrapper[4716]: I1209 15:28:30.819457 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-697bc559fc-pmct6"] Dec 09 15:28:30 crc kubenswrapper[4716]: I1209 15:28:30.829270 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-tf7dx"] Dec 09 15:28:30 crc kubenswrapper[4716]: W1209 15:28:30.830768 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod57b68343_4540_4097_9f68_a538c63bae3b.slice/crio-ae5c8bcd4c6d097fa30cbc4a54cf6da9bfab8ec3eda3cb7b10f989608d17f14f WatchSource:0}: Error finding container ae5c8bcd4c6d097fa30cbc4a54cf6da9bfab8ec3eda3cb7b10f989608d17f14f: Status 404 returned error can't find the container with id ae5c8bcd4c6d097fa30cbc4a54cf6da9bfab8ec3eda3cb7b10f989608d17f14f Dec 09 15:28:30 crc kubenswrapper[4716]: I1209 15:28:30.835710 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5854674fcc-kspms"] Dec 09 15:28:30 crc kubenswrapper[4716]: I1209 15:28:30.870088 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5b5fd79c9c-dzpdl"] Dec 09 15:28:30 crc kubenswrapper[4716]: I1209 15:28:30.883260 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-667bd8d554-s7dkv"] Dec 09 15:28:30 crc kubenswrapper[4716]: I1209 15:28:30.898129 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-998648c74-kgr69"] Dec 09 15:28:30 crc kubenswrapper[4716]: W1209 15:28:30.916791 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod436bb095_8486_4d67_9fc5_6596738065cc.slice/crio-bab0b1908200b866d11f8af9571a4c06d718d985ec95ff80489ce7f4e0b57ff1 WatchSource:0}: Error finding container bab0b1908200b866d11f8af9571a4c06d718d985ec95ff80489ce7f4e0b57ff1: Status 404 returned error can't find the container with id bab0b1908200b866d11f8af9571a4c06d718d985ec95ff80489ce7f4e0b57ff1 Dec 09 15:28:30 crc kubenswrapper[4716]: W1209 15:28:30.929876 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod368b843d_fa53_406c_a5a8_a78a8763718d.slice/crio-cf7099cc15f88e69bb517bbdaabfe19134d69bd3bdfd25814acf7b2403ddd311 WatchSource:0}: Error finding container cf7099cc15f88e69bb517bbdaabfe19134d69bd3bdfd25814acf7b2403ddd311: Status 404 returned error can't find the container with id cf7099cc15f88e69bb517bbdaabfe19134d69bd3bdfd25814acf7b2403ddd311 Dec 09 15:28:30 crc kubenswrapper[4716]: E1209 15:28:30.944287 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:101b3e007d8c9f2e183262d7712f986ad51256448099069bc14f1ea5f997ab94,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-84vh4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5854674fcc-kspms_openstack-operators(ae27699b-b83f-49e3-a0f0-a1fd2fc4c6f2): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 09 15:28:30 crc kubenswrapper[4716]: E1209 15:28:30.947098 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-84vh4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5854674fcc-kspms_openstack-operators(ae27699b-b83f-49e3-a0f0-a1fd2fc4c6f2): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 09 15:28:30 crc kubenswrapper[4716]: E1209 15:28:30.949165 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/test-operator-controller-manager-5854674fcc-kspms" podUID="ae27699b-b83f-49e3-a0f0-a1fd2fc4c6f2" Dec 09 15:28:30 crc kubenswrapper[4716]: E1209 15:28:30.950486 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/manila-operator@sha256:44126f9c6b1d2bf752ddf989e20a4fc4cc1c07723d4fcb78465ccb2f55da6b3a,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-fklkt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-5b5fd79c9c-dzpdl_openstack-operators(30c82b79-c03d-45d3-8b0a-ca506daf2934): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 09 15:28:31 crc kubenswrapper[4716]: I1209 15:28:31.045959 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b5a74069-1167-4e2a-a3b2-c11507b121ab-cert\") pod \"infra-operator-controller-manager-78d48bff9d-sktgh\" (UID: \"b5a74069-1167-4e2a-a3b2-c11507b121ab\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-sktgh" Dec 09 15:28:31 crc kubenswrapper[4716]: E1209 15:28:31.046255 4716 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 09 15:28:31 crc kubenswrapper[4716]: E1209 15:28:31.046338 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b5a74069-1167-4e2a-a3b2-c11507b121ab-cert podName:b5a74069-1167-4e2a-a3b2-c11507b121ab nodeName:}" failed. No retries permitted until 2025-12-09 15:28:35.046318762 +0000 UTC m=+1202.201062750 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b5a74069-1167-4e2a-a3b2-c11507b121ab-cert") pod "infra-operator-controller-manager-78d48bff9d-sktgh" (UID: "b5a74069-1167-4e2a-a3b2-c11507b121ab") : secret "infra-operator-webhook-server-cert" not found Dec 09 15:28:31 crc kubenswrapper[4716]: I1209 15:28:31.298730 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-h4nlt" event={"ID":"4d332287-0f6e-46d1-9cd9-d31dd855d753","Type":"ContainerStarted","Data":"6b87ad8bd5e47e7f896ba7554de076f398b4bd14938c08394f68b15d13de5b01"} Dec 09 15:28:31 crc kubenswrapper[4716]: I1209 15:28:31.300555 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-pmct6" event={"ID":"f1029307-bcf9-40c0-b656-b7d203493022","Type":"ContainerStarted","Data":"938f1f94fab9a74cf060b5b416125a5509796dff0f8010794004ef483af141f2"} Dec 09 15:28:31 crc kubenswrapper[4716]: I1209 15:28:31.304211 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-5656d9bf69-llf2c" event={"ID":"57b68343-4540-4097-9f68-a538c63bae3b","Type":"ContainerStarted","Data":"ae5c8bcd4c6d097fa30cbc4a54cf6da9bfab8ec3eda3cb7b10f989608d17f14f"} Dec 09 15:28:31 crc kubenswrapper[4716]: I1209 15:28:31.309008 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5854674fcc-kspms" event={"ID":"ae27699b-b83f-49e3-a0f0-a1fd2fc4c6f2","Type":"ContainerStarted","Data":"05966a66e1c82c3fea65a74a0027688ef137f308e4d8ecac4618537a1cbb5c51"} Dec 09 15:28:31 crc kubenswrapper[4716]: E1209 15:28:31.312889 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:101b3e007d8c9f2e183262d7712f986ad51256448099069bc14f1ea5f997ab94\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5854674fcc-kspms" podUID="ae27699b-b83f-49e3-a0f0-a1fd2fc4c6f2" Dec 09 15:28:31 crc kubenswrapper[4716]: I1209 15:28:31.316244 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-szpds" event={"ID":"436bb095-8486-4d67-9fc5-6596738065cc","Type":"ContainerStarted","Data":"bab0b1908200b866d11f8af9571a4c06d718d985ec95ff80489ce7f4e0b57ff1"} Dec 09 15:28:31 crc kubenswrapper[4716]: I1209 15:28:31.320162 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-s7dkv" event={"ID":"368b843d-fa53-406c-a5a8-a78a8763718d","Type":"ContainerStarted","Data":"cf7099cc15f88e69bb517bbdaabfe19134d69bd3bdfd25814acf7b2403ddd311"} Dec 09 15:28:31 crc kubenswrapper[4716]: I1209 15:28:31.321557 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-998648c74-kgr69" event={"ID":"6890a044-24cb-49b2-9d60-105fa8e57573","Type":"ContainerStarted","Data":"ee5311fdff50f75a6dc03fb84ba5d7e6a9d4826f2b06b2b76533ad04ae0b2759"} Dec 09 15:28:31 crc kubenswrapper[4716]: I1209 15:28:31.323093 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-dzpdl" event={"ID":"30c82b79-c03d-45d3-8b0a-ca506daf2934","Type":"ContainerStarted","Data":"d7bd5a3c7f734b33ff9c86c5154bf17108a0424c5a5922dc5da3cd9549c56899"} Dec 09 15:28:31 crc kubenswrapper[4716]: I1209 15:28:31.324805 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-tf7dx" event={"ID":"08b1c536-2d1d-42fc-9e18-87ebe38bed16","Type":"ContainerStarted","Data":"84d9ef1f349b571677fe68b14989b57d82c6ac0f8e2ac5f82f1db18cb1ba7d4e"} Dec 09 15:28:31 crc kubenswrapper[4716]: I1209 15:28:31.326905 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-967d97867-hq8hn" event={"ID":"695a0bab-c2c8-4c7d-9420-2dc191000e54","Type":"ContainerStarted","Data":"baf7bb13721513da0a67d73ae9159c5f025d700a838d3fe58f60ffc166b0ba10"} Dec 09 15:28:31 crc kubenswrapper[4716]: I1209 15:28:31.334922 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-649h2" event={"ID":"d5727f03-aede-454c-8dec-17e10986da51","Type":"ContainerStarted","Data":"94ed3f2450c7df8dae7d14b5a455f04aea494ce65c9a161c05531d5960b3c436"} Dec 09 15:28:31 crc kubenswrapper[4716]: I1209 15:28:31.557934 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/05653cf9-bc33-44bb-acdd-21dd610a7665-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fdbbht\" (UID: \"05653cf9-bc33-44bb-acdd-21dd610a7665\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdbbht" Dec 09 15:28:31 crc kubenswrapper[4716]: E1209 15:28:31.558141 4716 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 09 15:28:31 crc kubenswrapper[4716]: E1209 15:28:31.558243 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/05653cf9-bc33-44bb-acdd-21dd610a7665-cert podName:05653cf9-bc33-44bb-acdd-21dd610a7665 nodeName:}" failed. No retries permitted until 2025-12-09 15:28:35.558218512 +0000 UTC m=+1202.712962550 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/05653cf9-bc33-44bb-acdd-21dd610a7665-cert") pod "openstack-baremetal-operator-controller-manager-84b575879fdbbht" (UID: "05653cf9-bc33-44bb-acdd-21dd610a7665") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 09 15:28:31 crc kubenswrapper[4716]: I1209 15:28:31.976544 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-webhook-certs\") pod \"openstack-operator-controller-manager-5955d8c9f-mpmbx\" (UID: \"3221ee7d-c104-4055-961a-46cd6ba8c602\") " pod="openstack-operators/openstack-operator-controller-manager-5955d8c9f-mpmbx" Dec 09 15:28:31 crc kubenswrapper[4716]: I1209 15:28:31.976684 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-metrics-certs\") pod \"openstack-operator-controller-manager-5955d8c9f-mpmbx\" (UID: \"3221ee7d-c104-4055-961a-46cd6ba8c602\") " pod="openstack-operators/openstack-operator-controller-manager-5955d8c9f-mpmbx" Dec 09 15:28:31 crc kubenswrapper[4716]: E1209 15:28:31.976870 4716 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 09 15:28:31 crc kubenswrapper[4716]: E1209 15:28:31.976921 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-metrics-certs podName:3221ee7d-c104-4055-961a-46cd6ba8c602 nodeName:}" failed. No retries permitted until 2025-12-09 15:28:35.976905687 +0000 UTC m=+1203.131649675 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-metrics-certs") pod "openstack-operator-controller-manager-5955d8c9f-mpmbx" (UID: "3221ee7d-c104-4055-961a-46cd6ba8c602") : secret "metrics-server-cert" not found Dec 09 15:28:31 crc kubenswrapper[4716]: E1209 15:28:31.976964 4716 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 09 15:28:31 crc kubenswrapper[4716]: E1209 15:28:31.976981 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-webhook-certs podName:3221ee7d-c104-4055-961a-46cd6ba8c602 nodeName:}" failed. No retries permitted until 2025-12-09 15:28:35.976975219 +0000 UTC m=+1203.131719207 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-webhook-certs") pod "openstack-operator-controller-manager-5955d8c9f-mpmbx" (UID: "3221ee7d-c104-4055-961a-46cd6ba8c602") : secret "webhook-server-cert" not found Dec 09 15:28:32 crc kubenswrapper[4716]: E1209 15:28:32.365124 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:101b3e007d8c9f2e183262d7712f986ad51256448099069bc14f1ea5f997ab94\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5854674fcc-kspms" podUID="ae27699b-b83f-49e3-a0f0-a1fd2fc4c6f2" Dec 09 15:28:35 crc kubenswrapper[4716]: E1209 15:28:35.069671 4716 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 09 15:28:35 crc kubenswrapper[4716]: E1209 15:28:35.070506 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b5a74069-1167-4e2a-a3b2-c11507b121ab-cert podName:b5a74069-1167-4e2a-a3b2-c11507b121ab nodeName:}" failed. No retries permitted until 2025-12-09 15:28:43.070476127 +0000 UTC m=+1210.225220115 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/b5a74069-1167-4e2a-a3b2-c11507b121ab-cert") pod "infra-operator-controller-manager-78d48bff9d-sktgh" (UID: "b5a74069-1167-4e2a-a3b2-c11507b121ab") : secret "infra-operator-webhook-server-cert" not found Dec 09 15:28:35 crc kubenswrapper[4716]: I1209 15:28:35.069712 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b5a74069-1167-4e2a-a3b2-c11507b121ab-cert\") pod \"infra-operator-controller-manager-78d48bff9d-sktgh\" (UID: \"b5a74069-1167-4e2a-a3b2-c11507b121ab\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-sktgh" Dec 09 15:28:35 crc kubenswrapper[4716]: I1209 15:28:35.579934 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/05653cf9-bc33-44bb-acdd-21dd610a7665-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fdbbht\" (UID: \"05653cf9-bc33-44bb-acdd-21dd610a7665\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdbbht" Dec 09 15:28:35 crc kubenswrapper[4716]: E1209 15:28:35.580167 4716 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 09 15:28:35 crc kubenswrapper[4716]: E1209 15:28:35.580262 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/05653cf9-bc33-44bb-acdd-21dd610a7665-cert podName:05653cf9-bc33-44bb-acdd-21dd610a7665 nodeName:}" failed. No retries permitted until 2025-12-09 15:28:43.580242117 +0000 UTC m=+1210.734986105 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/05653cf9-bc33-44bb-acdd-21dd610a7665-cert") pod "openstack-baremetal-operator-controller-manager-84b575879fdbbht" (UID: "05653cf9-bc33-44bb-acdd-21dd610a7665") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 09 15:28:35 crc kubenswrapper[4716]: I1209 15:28:35.987540 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-webhook-certs\") pod \"openstack-operator-controller-manager-5955d8c9f-mpmbx\" (UID: \"3221ee7d-c104-4055-961a-46cd6ba8c602\") " pod="openstack-operators/openstack-operator-controller-manager-5955d8c9f-mpmbx" Dec 09 15:28:35 crc kubenswrapper[4716]: I1209 15:28:35.987675 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-metrics-certs\") pod \"openstack-operator-controller-manager-5955d8c9f-mpmbx\" (UID: \"3221ee7d-c104-4055-961a-46cd6ba8c602\") " pod="openstack-operators/openstack-operator-controller-manager-5955d8c9f-mpmbx" Dec 09 15:28:35 crc kubenswrapper[4716]: E1209 15:28:35.987799 4716 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 09 15:28:35 crc kubenswrapper[4716]: E1209 15:28:35.987887 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-metrics-certs podName:3221ee7d-c104-4055-961a-46cd6ba8c602 nodeName:}" failed. No retries permitted until 2025-12-09 15:28:43.987869366 +0000 UTC m=+1211.142613354 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-metrics-certs") pod "openstack-operator-controller-manager-5955d8c9f-mpmbx" (UID: "3221ee7d-c104-4055-961a-46cd6ba8c602") : secret "metrics-server-cert" not found Dec 09 15:28:35 crc kubenswrapper[4716]: E1209 15:28:35.987799 4716 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 09 15:28:35 crc kubenswrapper[4716]: E1209 15:28:35.987972 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-webhook-certs podName:3221ee7d-c104-4055-961a-46cd6ba8c602 nodeName:}" failed. No retries permitted until 2025-12-09 15:28:43.987954489 +0000 UTC m=+1211.142698477 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-webhook-certs") pod "openstack-operator-controller-manager-5955d8c9f-mpmbx" (UID: "3221ee7d-c104-4055-961a-46cd6ba8c602") : secret "webhook-server-cert" not found Dec 09 15:28:43 crc kubenswrapper[4716]: I1209 15:28:43.128507 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b5a74069-1167-4e2a-a3b2-c11507b121ab-cert\") pod \"infra-operator-controller-manager-78d48bff9d-sktgh\" (UID: \"b5a74069-1167-4e2a-a3b2-c11507b121ab\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-sktgh" Dec 09 15:28:43 crc kubenswrapper[4716]: I1209 15:28:43.134191 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b5a74069-1167-4e2a-a3b2-c11507b121ab-cert\") pod \"infra-operator-controller-manager-78d48bff9d-sktgh\" (UID: \"b5a74069-1167-4e2a-a3b2-c11507b121ab\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-sktgh" Dec 09 15:28:43 crc kubenswrapper[4716]: I1209 15:28:43.198871 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-m6p7d" Dec 09 15:28:43 crc kubenswrapper[4716]: I1209 15:28:43.207586 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-sktgh" Dec 09 15:28:43 crc kubenswrapper[4716]: E1209 15:28:43.569016 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ovn-operator@sha256:635a4aef9d6f0b799e8ec91333dbb312160c001d05b3c63f614c124e0b67cb59" Dec 09 15:28:43 crc kubenswrapper[4716]: E1209 15:28:43.569236 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:635a4aef9d6f0b799e8ec91333dbb312160c001d05b3c63f614c124e0b67cb59,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-snwqd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-b6456fdb6-649h2_openstack-operators(d5727f03-aede-454c-8dec-17e10986da51): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 15:28:43 crc kubenswrapper[4716]: I1209 15:28:43.636344 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/05653cf9-bc33-44bb-acdd-21dd610a7665-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fdbbht\" (UID: \"05653cf9-bc33-44bb-acdd-21dd610a7665\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdbbht" Dec 09 15:28:43 crc kubenswrapper[4716]: E1209 15:28:43.636545 4716 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 09 15:28:43 crc kubenswrapper[4716]: E1209 15:28:43.636640 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/05653cf9-bc33-44bb-acdd-21dd610a7665-cert podName:05653cf9-bc33-44bb-acdd-21dd610a7665 nodeName:}" failed. No retries permitted until 2025-12-09 15:28:59.636603884 +0000 UTC m=+1226.791347862 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/05653cf9-bc33-44bb-acdd-21dd610a7665-cert") pod "openstack-baremetal-operator-controller-manager-84b575879fdbbht" (UID: "05653cf9-bc33-44bb-acdd-21dd610a7665") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 09 15:28:44 crc kubenswrapper[4716]: I1209 15:28:44.044124 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-metrics-certs\") pod \"openstack-operator-controller-manager-5955d8c9f-mpmbx\" (UID: \"3221ee7d-c104-4055-961a-46cd6ba8c602\") " pod="openstack-operators/openstack-operator-controller-manager-5955d8c9f-mpmbx" Dec 09 15:28:44 crc kubenswrapper[4716]: E1209 15:28:44.044385 4716 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 09 15:28:44 crc kubenswrapper[4716]: I1209 15:28:44.044600 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-webhook-certs\") pod \"openstack-operator-controller-manager-5955d8c9f-mpmbx\" (UID: \"3221ee7d-c104-4055-961a-46cd6ba8c602\") " pod="openstack-operators/openstack-operator-controller-manager-5955d8c9f-mpmbx" Dec 09 15:28:44 crc kubenswrapper[4716]: E1209 15:28:44.044636 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-metrics-certs podName:3221ee7d-c104-4055-961a-46cd6ba8c602 nodeName:}" failed. No retries permitted until 2025-12-09 15:29:00.044603655 +0000 UTC m=+1227.199347643 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-metrics-certs") pod "openstack-operator-controller-manager-5955d8c9f-mpmbx" (UID: "3221ee7d-c104-4055-961a-46cd6ba8c602") : secret "metrics-server-cert" not found Dec 09 15:28:44 crc kubenswrapper[4716]: E1209 15:28:44.044792 4716 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 09 15:28:44 crc kubenswrapper[4716]: E1209 15:28:44.044865 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-webhook-certs podName:3221ee7d-c104-4055-961a-46cd6ba8c602 nodeName:}" failed. No retries permitted until 2025-12-09 15:29:00.044846292 +0000 UTC m=+1227.199590340 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-webhook-certs") pod "openstack-operator-controller-manager-5955d8c9f-mpmbx" (UID: "3221ee7d-c104-4055-961a-46cd6ba8c602") : secret "webhook-server-cert" not found Dec 09 15:28:44 crc kubenswrapper[4716]: E1209 15:28:44.989713 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/heat-operator@sha256:c4abfc148600dfa85915f3dc911d988ea2335f26cb6b8d749fe79bfe53e5e429" Dec 09 15:28:44 crc kubenswrapper[4716]: E1209 15:28:44.989966 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/heat-operator@sha256:c4abfc148600dfa85915f3dc911d988ea2335f26cb6b8d749fe79bfe53e5e429,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qctqv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-operator-controller-manager-5f64f6f8bb-2ft2d_openstack-operators(c2869bc2-fe1e-49a0-9bd5-ec2b1a23b489): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 15:28:45 crc kubenswrapper[4716]: E1209 15:28:45.772172 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/neutron-operator@sha256:0b3fb69f35c151895d3dffd514974a9f9fe1c77c3bca69b78b81efb183cf4557" Dec 09 15:28:45 crc kubenswrapper[4716]: E1209 15:28:45.772382 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/neutron-operator@sha256:0b3fb69f35c151895d3dffd514974a9f9fe1c77c3bca69b78b81efb183cf4557,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jgglz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-5fdfd5b6b5-qgr4j_openstack-operators(86831782-f8f0-40ad-99f7-4568185065b0): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 15:28:46 crc kubenswrapper[4716]: E1209 15:28:46.593616 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ironic-operator@sha256:5bdb3685be3ddc1efd62e16aaf2fa96ead64315e26d52b1b2a7d8ac01baa1e87" Dec 09 15:28:46 crc kubenswrapper[4716]: E1209 15:28:46.594254 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ironic-operator@sha256:5bdb3685be3ddc1efd62e16aaf2fa96ead64315e26d52b1b2a7d8ac01baa1e87,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-w8kdh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-967d97867-hq8hn_openstack-operators(695a0bab-c2c8-4c7d-9420-2dc191000e54): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 15:28:47 crc kubenswrapper[4716]: I1209 15:28:47.922488 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 15:28:47 crc kubenswrapper[4716]: I1209 15:28:47.923745 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 15:28:48 crc kubenswrapper[4716]: E1209 15:28:48.289308 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/designate-operator@sha256:900050d3501c0785b227db34b89883efe68247816e5c7427cacb74f8aa10605a" Dec 09 15:28:48 crc kubenswrapper[4716]: E1209 15:28:48.292968 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/designate-operator@sha256:900050d3501c0785b227db34b89883efe68247816e5c7427cacb74f8aa10605a,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-w7q8w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod designate-operator-controller-manager-697fb699cf-p6hrb_openstack-operators(ee92f03c-d515-4aa3-ad3c-cce0c89fd12b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 15:28:50 crc kubenswrapper[4716]: E1209 15:28:50.237908 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/glance-operator@sha256:5370dc4a8e776923eec00bb50cbdb2e390e9dde50be26bdc04a216bd2d6b5027" Dec 09 15:28:50 crc kubenswrapper[4716]: E1209 15:28:50.238613 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/glance-operator@sha256:5370dc4a8e776923eec00bb50cbdb2e390e9dde50be26bdc04a216bd2d6b5027,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-x7vkk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-operator-controller-manager-5697bb5779-s7mv7_openstack-operators(b60a4ca3-f2c6-4829-bc4f-d47b0740e378): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 15:28:50 crc kubenswrapper[4716]: E1209 15:28:50.739223 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/placement-operator@sha256:d29650b006da97eb9178fcc58f2eb9fead8c2b414fac18f86a3c3a1507488c4f" Dec 09 15:28:50 crc kubenswrapper[4716]: E1209 15:28:50.739676 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:d29650b006da97eb9178fcc58f2eb9fead8c2b414fac18f86a3c3a1507488c4f,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wj2sv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-78f8948974-fwnp7_openstack-operators(43243e09-47d6-428c-adc6-3542056106b5): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 15:28:52 crc kubenswrapper[4716]: E1209 15:28:52.517369 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/swift-operator@sha256:3aa109bb973253ae9dcf339b9b65abbd1176cdb4be672c93e538a5f113816991" Dec 09 15:28:52 crc kubenswrapper[4716]: E1209 15:28:52.517755 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:3aa109bb973253ae9dcf339b9b65abbd1176cdb4be672c93e538a5f113816991,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-mgrxx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-9d58d64bc-h4nlt_openstack-operators(4d332287-0f6e-46d1-9cd9-d31dd855d753): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 15:28:53 crc kubenswrapper[4716]: E1209 15:28:53.739118 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/watcher-operator@sha256:6b3e0302608a2e70f9b5ae9167f6fbf59264f226d9db99d48f70466ab2f216b8" Dec 09 15:28:53 crc kubenswrapper[4716]: E1209 15:28:53.739327 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:6b3e0302608a2e70f9b5ae9167f6fbf59264f226d9db99d48f70466ab2f216b8,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-67lgp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-667bd8d554-s7dkv_openstack-operators(368b843d-fa53-406c-a5a8-a78a8763718d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 15:28:56 crc kubenswrapper[4716]: E1209 15:28:56.227990 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/octavia-operator@sha256:d9a3694865a7d54ee96397add18c3898886e98d079aa20876a0f4de1fa7a7168" Dec 09 15:28:56 crc kubenswrapper[4716]: E1209 15:28:56.228483 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:d9a3694865a7d54ee96397add18c3898886e98d079aa20876a0f4de1fa7a7168,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-78lfq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-998648c74-kgr69_openstack-operators(6890a044-24cb-49b2-9d60-105fa8e57573): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 15:28:57 crc kubenswrapper[4716]: E1209 15:28:57.669343 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.136:5001/openstack-k8s-operators/telemetry-operator:c4794e7165126ca78a1af546bb4ba50c90b5c4e1" Dec 09 15:28:57 crc kubenswrapper[4716]: E1209 15:28:57.669721 4716 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.136:5001/openstack-k8s-operators/telemetry-operator:c4794e7165126ca78a1af546bb4ba50c90b5c4e1" Dec 09 15:28:57 crc kubenswrapper[4716]: E1209 15:28:57.669905 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:38.102.83.136:5001/openstack-k8s-operators/telemetry-operator:c4794e7165126ca78a1af546bb4ba50c90b5c4e1,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-nc9fz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-5656d9bf69-llf2c_openstack-operators(57b68343-4540-4097-9f68-a538c63bae3b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 15:28:59 crc kubenswrapper[4716]: E1209 15:28:59.222165 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/nova-operator@sha256:779f0cee6024d0fb8f259b036fe790e62aa5a3b0431ea9bf15a6e7d02e2e5670" Dec 09 15:28:59 crc kubenswrapper[4716]: E1209 15:28:59.222645 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:779f0cee6024d0fb8f259b036fe790e62aa5a3b0431ea9bf15a6e7d02e2e5670,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-s6sk8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-697bc559fc-pmct6_openstack-operators(f1029307-bcf9-40c0-b656-b7d203493022): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 15:28:59 crc kubenswrapper[4716]: E1209 15:28:59.667936 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2" Dec 09 15:28:59 crc kubenswrapper[4716]: E1209 15:28:59.668322 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-rh5qh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-tf7dx_openstack-operators(08b1c536-2d1d-42fc-9e18-87ebe38bed16): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 15:28:59 crc kubenswrapper[4716]: E1209 15:28:59.669494 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-tf7dx" podUID="08b1c536-2d1d-42fc-9e18-87ebe38bed16" Dec 09 15:28:59 crc kubenswrapper[4716]: I1209 15:28:59.676992 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/05653cf9-bc33-44bb-acdd-21dd610a7665-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fdbbht\" (UID: \"05653cf9-bc33-44bb-acdd-21dd610a7665\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdbbht" Dec 09 15:28:59 crc kubenswrapper[4716]: I1209 15:28:59.687086 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/05653cf9-bc33-44bb-acdd-21dd610a7665-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fdbbht\" (UID: \"05653cf9-bc33-44bb-acdd-21dd610a7665\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdbbht" Dec 09 15:28:59 crc kubenswrapper[4716]: I1209 15:28:59.923021 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-tnkd2" Dec 09 15:28:59 crc kubenswrapper[4716]: I1209 15:28:59.931721 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdbbht" Dec 09 15:29:00 crc kubenswrapper[4716]: I1209 15:29:00.083892 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-webhook-certs\") pod \"openstack-operator-controller-manager-5955d8c9f-mpmbx\" (UID: \"3221ee7d-c104-4055-961a-46cd6ba8c602\") " pod="openstack-operators/openstack-operator-controller-manager-5955d8c9f-mpmbx" Dec 09 15:29:00 crc kubenswrapper[4716]: I1209 15:29:00.085099 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-metrics-certs\") pod \"openstack-operator-controller-manager-5955d8c9f-mpmbx\" (UID: \"3221ee7d-c104-4055-961a-46cd6ba8c602\") " pod="openstack-operators/openstack-operator-controller-manager-5955d8c9f-mpmbx" Dec 09 15:29:00 crc kubenswrapper[4716]: I1209 15:29:00.087946 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-webhook-certs\") pod \"openstack-operator-controller-manager-5955d8c9f-mpmbx\" (UID: \"3221ee7d-c104-4055-961a-46cd6ba8c602\") " pod="openstack-operators/openstack-operator-controller-manager-5955d8c9f-mpmbx" Dec 09 15:29:00 crc kubenswrapper[4716]: I1209 15:29:00.088165 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3221ee7d-c104-4055-961a-46cd6ba8c602-metrics-certs\") pod \"openstack-operator-controller-manager-5955d8c9f-mpmbx\" (UID: \"3221ee7d-c104-4055-961a-46cd6ba8c602\") " pod="openstack-operators/openstack-operator-controller-manager-5955d8c9f-mpmbx" Dec 09 15:29:00 crc kubenswrapper[4716]: I1209 15:29:00.131510 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-ph5qp" Dec 09 15:29:00 crc kubenswrapper[4716]: I1209 15:29:00.139113 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-5955d8c9f-mpmbx" Dec 09 15:29:00 crc kubenswrapper[4716]: E1209 15:29:00.264812 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:72ad6517987f674af0d0ae092cbb874aeae909c8b8b60188099c311762ebc8f7" Dec 09 15:29:00 crc kubenswrapper[4716]: E1209 15:29:00.265019 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:72ad6517987f674af0d0ae092cbb874aeae909c8b8b60188099c311762ebc8f7,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-q9v2k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-7765d96ddf-szpds_openstack-operators(436bb095-8486-4d67-9fc5-6596738065cc): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 15:29:00 crc kubenswrapper[4716]: E1209 15:29:00.616422 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-tf7dx" podUID="08b1c536-2d1d-42fc-9e18-87ebe38bed16" Dec 09 15:29:01 crc kubenswrapper[4716]: I1209 15:29:01.397219 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-78d48bff9d-sktgh"] Dec 09 15:29:01 crc kubenswrapper[4716]: I1209 15:29:01.628216 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-sktgh" event={"ID":"b5a74069-1167-4e2a-a3b2-c11507b121ab","Type":"ContainerStarted","Data":"13ce5e3ab2cc65bf288137d4a1ecfe8ac16544168480c36b46f9d9c8463a9264"} Dec 09 15:29:02 crc kubenswrapper[4716]: I1209 15:29:02.005321 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-5955d8c9f-mpmbx"] Dec 09 15:29:02 crc kubenswrapper[4716]: I1209 15:29:02.235753 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdbbht"] Dec 09 15:29:02 crc kubenswrapper[4716]: I1209 15:29:02.643721 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-z2l9s" event={"ID":"33b32911-25d0-45d5-8009-4d9787875e86","Type":"ContainerStarted","Data":"f2d253a07fb444767f4c64e87f30611520af2a2e3db3db47dc1ea07aa8851f51"} Dec 09 15:29:07 crc kubenswrapper[4716]: W1209 15:29:07.309571 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3221ee7d_c104_4055_961a_46cd6ba8c602.slice/crio-47bd74e4cae8f35078e33652982ebd77c6e080c9fd5d95501067b6e9ea0e5e22 WatchSource:0}: Error finding container 47bd74e4cae8f35078e33652982ebd77c6e080c9fd5d95501067b6e9ea0e5e22: Status 404 returned error can't find the container with id 47bd74e4cae8f35078e33652982ebd77c6e080c9fd5d95501067b6e9ea0e5e22 Dec 09 15:29:07 crc kubenswrapper[4716]: I1209 15:29:07.692481 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5854674fcc-kspms" event={"ID":"ae27699b-b83f-49e3-a0f0-a1fd2fc4c6f2","Type":"ContainerStarted","Data":"6e14c4fb0734dc57815f2ff7400f42cfc0261c7185545e336166782c19c82eb3"} Dec 09 15:29:07 crc kubenswrapper[4716]: I1209 15:29:07.695512 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-xzp8n" event={"ID":"266cbf21-b738-4045-902b-88deadcc5869","Type":"ContainerStarted","Data":"96ef9a095f889c983589a8e5bb5e39670b8f3f6a7858eb9a2951292fc8cdf708"} Dec 09 15:29:07 crc kubenswrapper[4716]: I1209 15:29:07.702385 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-5pz92" event={"ID":"8da5796c-fddb-414b-8d26-6657d25e6c00","Type":"ContainerStarted","Data":"c47f3d7aac26f070678423ae75d544f95c716833d4aa956435931b4136b98342"} Dec 09 15:29:07 crc kubenswrapper[4716]: I1209 15:29:07.706042 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdbbht" event={"ID":"05653cf9-bc33-44bb-acdd-21dd610a7665","Type":"ContainerStarted","Data":"3ec68c6198c35c0b7b1bd450bbce3e4745761d1d5aa3c0284b545501e29a3632"} Dec 09 15:29:07 crc kubenswrapper[4716]: I1209 15:29:07.717653 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-5955d8c9f-mpmbx" event={"ID":"3221ee7d-c104-4055-961a-46cd6ba8c602","Type":"ContainerStarted","Data":"d1b4a5d4d1de265b9e5e2fd67ffb4901518505f7c23d0673b33ce36c2f3f32b4"} Dec 09 15:29:07 crc kubenswrapper[4716]: I1209 15:29:07.717702 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-5955d8c9f-mpmbx" event={"ID":"3221ee7d-c104-4055-961a-46cd6ba8c602","Type":"ContainerStarted","Data":"47bd74e4cae8f35078e33652982ebd77c6e080c9fd5d95501067b6e9ea0e5e22"} Dec 09 15:29:07 crc kubenswrapper[4716]: I1209 15:29:07.718486 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-5955d8c9f-mpmbx" Dec 09 15:29:07 crc kubenswrapper[4716]: I1209 15:29:07.734263 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-9d9ms" event={"ID":"87589b07-5c3c-46c9-b84e-ffc2efa3b817","Type":"ContainerStarted","Data":"ac82648ea7e4da74f58ef96f2f32add6071cf7593165adca2b798d6b6b8cdef0"} Dec 09 15:29:07 crc kubenswrapper[4716]: E1209 15:29:07.756435 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-szpds" podUID="436bb095-8486-4d67-9fc5-6596738065cc" Dec 09 15:29:07 crc kubenswrapper[4716]: I1209 15:29:07.758075 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-5955d8c9f-mpmbx" podStartSLOduration=40.758055526 podStartE2EDuration="40.758055526s" podCreationTimestamp="2025-12-09 15:28:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:29:07.749831142 +0000 UTC m=+1234.904575130" watchObservedRunningTime="2025-12-09 15:29:07.758055526 +0000 UTC m=+1234.912799514" Dec 09 15:29:07 crc kubenswrapper[4716]: E1209 15:29:07.765841 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-s7dkv" podUID="368b843d-fa53-406c-a5a8-a78a8763718d" Dec 09 15:29:07 crc kubenswrapper[4716]: E1209 15:29:07.767540 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-s7mv7" podUID="b60a4ca3-f2c6-4829-bc4f-d47b0740e378" Dec 09 15:29:07 crc kubenswrapper[4716]: E1209 15:29:07.848022 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/placement-operator-controller-manager-78f8948974-fwnp7" podUID="43243e09-47d6-428c-adc6-3542056106b5" Dec 09 15:29:08 crc kubenswrapper[4716]: E1209 15:29:08.161176 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Dec 09 15:29:08 crc kubenswrapper[4716]: E1209 15:29:08.161413 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-fklkt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-5b5fd79c9c-dzpdl_openstack-operators(30c82b79-c03d-45d3-8b0a-ca506daf2934): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 15:29:08 crc kubenswrapper[4716]: E1209 15:29:08.162801 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"]" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-dzpdl" podUID="30c82b79-c03d-45d3-8b0a-ca506daf2934" Dec 09 15:29:08 crc kubenswrapper[4716]: E1209 15:29:08.677992 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-649h2" podUID="d5727f03-aede-454c-8dec-17e10986da51" Dec 09 15:29:08 crc kubenswrapper[4716]: I1209 15:29:08.749538 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-s7dkv" event={"ID":"368b843d-fa53-406c-a5a8-a78a8763718d","Type":"ContainerStarted","Data":"5dcbd4e319b5cf900dd96d986e6e4a096276351e37e477e4eddc09979f1647fd"} Dec 09 15:29:08 crc kubenswrapper[4716]: I1209 15:29:08.766249 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-78f8948974-fwnp7" event={"ID":"43243e09-47d6-428c-adc6-3542056106b5","Type":"ContainerStarted","Data":"507e9d041daddbf7f43a26e80403b672d8657e60817bfb373188c88488252e9c"} Dec 09 15:29:08 crc kubenswrapper[4716]: I1209 15:29:08.779754 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-9d9ms" event={"ID":"87589b07-5c3c-46c9-b84e-ffc2efa3b817","Type":"ContainerStarted","Data":"eb1a875ce56bb9707e993c33c3532c28e4c182d9cd669ad2e8e678384628050f"} Dec 09 15:29:08 crc kubenswrapper[4716]: I1209 15:29:08.781277 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-9d9ms" Dec 09 15:29:08 crc kubenswrapper[4716]: I1209 15:29:08.786277 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-s7mv7" event={"ID":"b60a4ca3-f2c6-4829-bc4f-d47b0740e378","Type":"ContainerStarted","Data":"edcc90f6e898fd3a422bd91e4a9cc1b8269978b5d4320d3057e911e766852923"} Dec 09 15:29:08 crc kubenswrapper[4716]: I1209 15:29:08.788403 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5854674fcc-kspms" event={"ID":"ae27699b-b83f-49e3-a0f0-a1fd2fc4c6f2","Type":"ContainerStarted","Data":"fbb766e261f908114f0fd0122c0fc57d71baf8688464a9d75a8ad61c8bd86aff"} Dec 09 15:29:08 crc kubenswrapper[4716]: I1209 15:29:08.788679 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5854674fcc-kspms" Dec 09 15:29:08 crc kubenswrapper[4716]: I1209 15:29:08.791920 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-szpds" event={"ID":"436bb095-8486-4d67-9fc5-6596738065cc","Type":"ContainerStarted","Data":"0d50f712f42c927fd7d7c0a150f2da1dca52a99e4238758848f8025bdf9813df"} Dec 09 15:29:08 crc kubenswrapper[4716]: E1209 15:29:08.796856 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:72ad6517987f674af0d0ae092cbb874aeae909c8b8b60188099c311762ebc8f7\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-szpds" podUID="436bb095-8486-4d67-9fc5-6596738065cc" Dec 09 15:29:08 crc kubenswrapper[4716]: I1209 15:29:08.797636 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-649h2" event={"ID":"d5727f03-aede-454c-8dec-17e10986da51","Type":"ContainerStarted","Data":"ffd8f9e1865f92498c469bc634dde5f727c7258cda1ba3e0f7d3b4ce5f63adc8"} Dec 09 15:29:08 crc kubenswrapper[4716]: I1209 15:29:08.803984 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-xzp8n" event={"ID":"266cbf21-b738-4045-902b-88deadcc5869","Type":"ContainerStarted","Data":"c9e6d500e8facce36277b646b712721cd396271f93821251afdbb1a22dcc6ff6"} Dec 09 15:29:08 crc kubenswrapper[4716]: I1209 15:29:08.804235 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-xzp8n" Dec 09 15:29:08 crc kubenswrapper[4716]: I1209 15:29:08.862412 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-9d9ms" podStartSLOduration=10.323030193 podStartE2EDuration="41.862389121s" podCreationTimestamp="2025-12-09 15:28:27 +0000 UTC" firstStartedPulling="2025-12-09 15:28:29.364553065 +0000 UTC m=+1196.519297053" lastFinishedPulling="2025-12-09 15:29:00.903911983 +0000 UTC m=+1228.058655981" observedRunningTime="2025-12-09 15:29:08.852057288 +0000 UTC m=+1236.006801276" watchObservedRunningTime="2025-12-09 15:29:08.862389121 +0000 UTC m=+1236.017133109" Dec 09 15:29:08 crc kubenswrapper[4716]: I1209 15:29:08.905775 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-5854674fcc-kspms" podStartSLOduration=11.465505425 podStartE2EDuration="41.905759173s" podCreationTimestamp="2025-12-09 15:28:27 +0000 UTC" firstStartedPulling="2025-12-09 15:28:30.944140952 +0000 UTC m=+1198.098884940" lastFinishedPulling="2025-12-09 15:29:01.3843947 +0000 UTC m=+1228.539138688" observedRunningTime="2025-12-09 15:29:08.905044072 +0000 UTC m=+1236.059788060" watchObservedRunningTime="2025-12-09 15:29:08.905759173 +0000 UTC m=+1236.060503161" Dec 09 15:29:08 crc kubenswrapper[4716]: E1209 15:29:08.959188 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/telemetry-operator-controller-manager-5656d9bf69-llf2c" podUID="57b68343-4540-4097-9f68-a538c63bae3b" Dec 09 15:29:09 crc kubenswrapper[4716]: E1209 15:29:09.020280 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ironic-operator-controller-manager-967d97867-hq8hn" podUID="695a0bab-c2c8-4c7d-9420-2dc191000e54" Dec 09 15:29:09 crc kubenswrapper[4716]: I1209 15:29:09.069706 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-xzp8n" podStartSLOduration=10.16750389 podStartE2EDuration="42.069684736s" podCreationTimestamp="2025-12-09 15:28:27 +0000 UTC" firstStartedPulling="2025-12-09 15:28:29.001706196 +0000 UTC m=+1196.156450184" lastFinishedPulling="2025-12-09 15:29:00.903887042 +0000 UTC m=+1228.058631030" observedRunningTime="2025-12-09 15:29:09.057740567 +0000 UTC m=+1236.212484555" watchObservedRunningTime="2025-12-09 15:29:09.069684736 +0000 UTC m=+1236.224428724" Dec 09 15:29:09 crc kubenswrapper[4716]: E1209 15:29:09.492543 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-2ft2d" podUID="c2869bc2-fe1e-49a0-9bd5-ec2b1a23b489" Dec 09 15:29:09 crc kubenswrapper[4716]: E1209 15:29:09.535916 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-p6hrb" podUID="ee92f03c-d515-4aa3-ad3c-cce0c89fd12b" Dec 09 15:29:09 crc kubenswrapper[4716]: E1209 15:29:09.550177 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-qgr4j" podUID="86831782-f8f0-40ad-99f7-4568185065b0" Dec 09 15:29:09 crc kubenswrapper[4716]: E1209 15:29:09.701390 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/octavia-operator-controller-manager-998648c74-kgr69" podUID="6890a044-24cb-49b2-9d60-105fa8e57573" Dec 09 15:29:09 crc kubenswrapper[4716]: E1209 15:29:09.800974 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-pmct6" podUID="f1029307-bcf9-40c0-b656-b7d203493022" Dec 09 15:29:09 crc kubenswrapper[4716]: E1209 15:29:09.817920 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-h4nlt" podUID="4d332287-0f6e-46d1-9cd9-d31dd855d753" Dec 09 15:29:09 crc kubenswrapper[4716]: I1209 15:29:09.828475 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-qgr4j" event={"ID":"86831782-f8f0-40ad-99f7-4568185065b0","Type":"ContainerStarted","Data":"7774d28a305863c784a6c67e82457fb9721d0b1b0343a4a27b1f95c706ee416a"} Dec 09 15:29:09 crc kubenswrapper[4716]: I1209 15:29:09.843248 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-pmct6" event={"ID":"f1029307-bcf9-40c0-b656-b7d203493022","Type":"ContainerStarted","Data":"7f90625a291ec8c6c15f55a958ef1a43a9d377ce8045610fd016ddd9948cf80b"} Dec 09 15:29:09 crc kubenswrapper[4716]: I1209 15:29:09.856238 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-z2l9s" event={"ID":"33b32911-25d0-45d5-8009-4d9787875e86","Type":"ContainerStarted","Data":"e578cfc0a969595c24a8429eb8c35aeb3cfc780a5500c2d0d1c97509dc8700b8"} Dec 09 15:29:09 crc kubenswrapper[4716]: I1209 15:29:09.857409 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-z2l9s" Dec 09 15:29:09 crc kubenswrapper[4716]: I1209 15:29:09.867006 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-967d97867-hq8hn" event={"ID":"695a0bab-c2c8-4c7d-9420-2dc191000e54","Type":"ContainerStarted","Data":"dd3034865d1c917f53a11c92c9fab8e39331e1d26a9de9a962ff505823bcec34"} Dec 09 15:29:09 crc kubenswrapper[4716]: I1209 15:29:09.868857 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-z2l9s" Dec 09 15:29:09 crc kubenswrapper[4716]: I1209 15:29:09.875850 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-h4nlt" event={"ID":"4d332287-0f6e-46d1-9cd9-d31dd855d753","Type":"ContainerStarted","Data":"147fd441add38884c9e6feae382ea5cfaa1a811a22cccaba56042338d8514f0c"} Dec 09 15:29:09 crc kubenswrapper[4716]: I1209 15:29:09.895367 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-5pz92" event={"ID":"8da5796c-fddb-414b-8d26-6657d25e6c00","Type":"ContainerStarted","Data":"385c7eace35134525077bfe689e613cf2cf20d7c254b6839f7a558aeffec10e9"} Dec 09 15:29:09 crc kubenswrapper[4716]: I1209 15:29:09.897009 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-5pz92" Dec 09 15:29:09 crc kubenswrapper[4716]: I1209 15:29:09.928304 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-998648c74-kgr69" event={"ID":"6890a044-24cb-49b2-9d60-105fa8e57573","Type":"ContainerStarted","Data":"9b563523ddd2bf1c26516d2a4f01bb60f9d52d48c3ac1ed4d454d536967579ad"} Dec 09 15:29:09 crc kubenswrapper[4716]: I1209 15:29:09.950948 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-p6hrb" event={"ID":"ee92f03c-d515-4aa3-ad3c-cce0c89fd12b","Type":"ContainerStarted","Data":"adb1b2639e8fddb9e7611ac38630d0b8a77d6db93dd2248aa26e2e92abdcea9b"} Dec 09 15:29:09 crc kubenswrapper[4716]: I1209 15:29:09.987268 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-2ft2d" event={"ID":"c2869bc2-fe1e-49a0-9bd5-ec2b1a23b489","Type":"ContainerStarted","Data":"f2da1005adbf7dccc0e126cefb4140b12163b0a4351d0a143a390b752aa946f5"} Dec 09 15:29:10 crc kubenswrapper[4716]: I1209 15:29:10.020556 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-5656d9bf69-llf2c" event={"ID":"57b68343-4540-4097-9f68-a538c63bae3b","Type":"ContainerStarted","Data":"8559fe527661c7c26ae008c8f6f28eef8abbfeada84e7f12a8f290d6e7060923"} Dec 09 15:29:10 crc kubenswrapper[4716]: I1209 15:29:10.026290 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-z2l9s" podStartSLOduration=4.418555178 podStartE2EDuration="43.026264268s" podCreationTimestamp="2025-12-09 15:28:27 +0000 UTC" firstStartedPulling="2025-12-09 15:28:29.422419138 +0000 UTC m=+1196.577163126" lastFinishedPulling="2025-12-09 15:29:08.030128238 +0000 UTC m=+1235.184872216" observedRunningTime="2025-12-09 15:29:10.006875298 +0000 UTC m=+1237.161619286" watchObservedRunningTime="2025-12-09 15:29:10.026264268 +0000 UTC m=+1237.181008256" Dec 09 15:29:10 crc kubenswrapper[4716]: E1209 15:29:10.048876 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:72ad6517987f674af0d0ae092cbb874aeae909c8b8b60188099c311762ebc8f7\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-szpds" podUID="436bb095-8486-4d67-9fc5-6596738065cc" Dec 09 15:29:10 crc kubenswrapper[4716]: I1209 15:29:10.159745 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-5pz92" podStartSLOduration=12.111780268 podStartE2EDuration="43.159727507s" podCreationTimestamp="2025-12-09 15:28:27 +0000 UTC" firstStartedPulling="2025-12-09 15:28:29.856010745 +0000 UTC m=+1197.010754733" lastFinishedPulling="2025-12-09 15:29:00.903957984 +0000 UTC m=+1228.058701972" observedRunningTime="2025-12-09 15:29:10.114301707 +0000 UTC m=+1237.269045695" watchObservedRunningTime="2025-12-09 15:29:10.159727507 +0000 UTC m=+1237.314471495" Dec 09 15:29:11 crc kubenswrapper[4716]: I1209 15:29:11.068125 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-649h2" event={"ID":"d5727f03-aede-454c-8dec-17e10986da51","Type":"ContainerStarted","Data":"7bbefb2500356484b961d291155735e0fd1024a6eeafef4526499e943ec66a88"} Dec 09 15:29:11 crc kubenswrapper[4716]: I1209 15:29:11.068613 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-649h2" Dec 09 15:29:11 crc kubenswrapper[4716]: I1209 15:29:11.073222 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-s7dkv" event={"ID":"368b843d-fa53-406c-a5a8-a78a8763718d","Type":"ContainerStarted","Data":"fcd97f16d053340f9c97ad39cacf893bb0f0751e8583dca24d2b5465d8583a92"} Dec 09 15:29:11 crc kubenswrapper[4716]: I1209 15:29:11.074142 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-s7dkv" Dec 09 15:29:11 crc kubenswrapper[4716]: I1209 15:29:11.081792 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-78f8948974-fwnp7" event={"ID":"43243e09-47d6-428c-adc6-3542056106b5","Type":"ContainerStarted","Data":"66723b9d4f9aa36768831e9978ff5f9a68016d8077ac267c7a1d7a1a4f15f2dc"} Dec 09 15:29:11 crc kubenswrapper[4716]: I1209 15:29:11.082043 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-78f8948974-fwnp7" Dec 09 15:29:11 crc kubenswrapper[4716]: I1209 15:29:11.088647 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-5656d9bf69-llf2c" event={"ID":"57b68343-4540-4097-9f68-a538c63bae3b","Type":"ContainerStarted","Data":"99449cb2a8128319fb287e74193d8a8ff56b466c73479a371d145356732e7387"} Dec 09 15:29:11 crc kubenswrapper[4716]: I1209 15:29:11.088890 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-5656d9bf69-llf2c" Dec 09 15:29:11 crc kubenswrapper[4716]: I1209 15:29:11.092978 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-s7mv7" event={"ID":"b60a4ca3-f2c6-4829-bc4f-d47b0740e378","Type":"ContainerStarted","Data":"e68d4157396e6cd9cf38472f4f6146c67b2e5de6383566a0cf3c48db7d02ec5c"} Dec 09 15:29:11 crc kubenswrapper[4716]: I1209 15:29:11.093016 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-s7mv7" Dec 09 15:29:11 crc kubenswrapper[4716]: I1209 15:29:11.126050 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-649h2" podStartSLOduration=5.582584699 podStartE2EDuration="44.126024005s" podCreationTimestamp="2025-12-09 15:28:27 +0000 UTC" firstStartedPulling="2025-12-09 15:28:30.898000222 +0000 UTC m=+1198.052744210" lastFinishedPulling="2025-12-09 15:29:09.441439528 +0000 UTC m=+1236.596183516" observedRunningTime="2025-12-09 15:29:11.084891108 +0000 UTC m=+1238.239635096" watchObservedRunningTime="2025-12-09 15:29:11.126024005 +0000 UTC m=+1238.280767993" Dec 09 15:29:11 crc kubenswrapper[4716]: I1209 15:29:11.137805 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-s7dkv" podStartSLOduration=5.349291787 podStartE2EDuration="44.137576683s" podCreationTimestamp="2025-12-09 15:28:27 +0000 UTC" firstStartedPulling="2025-12-09 15:28:30.941996821 +0000 UTC m=+1198.096740809" lastFinishedPulling="2025-12-09 15:29:09.730281727 +0000 UTC m=+1236.885025705" observedRunningTime="2025-12-09 15:29:11.110033881 +0000 UTC m=+1238.264777869" watchObservedRunningTime="2025-12-09 15:29:11.137576683 +0000 UTC m=+1238.292320671" Dec 09 15:29:11 crc kubenswrapper[4716]: I1209 15:29:11.162087 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-78f8948974-fwnp7" podStartSLOduration=4.725003467 podStartE2EDuration="44.162061558s" podCreationTimestamp="2025-12-09 15:28:27 +0000 UTC" firstStartedPulling="2025-12-09 15:28:29.88718887 +0000 UTC m=+1197.041932858" lastFinishedPulling="2025-12-09 15:29:09.324246971 +0000 UTC m=+1236.478990949" observedRunningTime="2025-12-09 15:29:11.132162139 +0000 UTC m=+1238.286906137" watchObservedRunningTime="2025-12-09 15:29:11.162061558 +0000 UTC m=+1238.316805546" Dec 09 15:29:11 crc kubenswrapper[4716]: I1209 15:29:11.187058 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-s7mv7" podStartSLOduration=4.638331547 podStartE2EDuration="44.187035977s" podCreationTimestamp="2025-12-09 15:28:27 +0000 UTC" firstStartedPulling="2025-12-09 15:28:29.893770087 +0000 UTC m=+1197.048514075" lastFinishedPulling="2025-12-09 15:29:09.442474517 +0000 UTC m=+1236.597218505" observedRunningTime="2025-12-09 15:29:11.161315547 +0000 UTC m=+1238.316059525" watchObservedRunningTime="2025-12-09 15:29:11.187035977 +0000 UTC m=+1238.341779965" Dec 09 15:29:11 crc kubenswrapper[4716]: I1209 15:29:11.196031 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-5656d9bf69-llf2c" podStartSLOduration=4.864395433 podStartE2EDuration="44.196016012s" podCreationTimestamp="2025-12-09 15:28:27 +0000 UTC" firstStartedPulling="2025-12-09 15:28:30.839840281 +0000 UTC m=+1197.994584269" lastFinishedPulling="2025-12-09 15:29:10.17146086 +0000 UTC m=+1237.326204848" observedRunningTime="2025-12-09 15:29:11.187714506 +0000 UTC m=+1238.342458494" watchObservedRunningTime="2025-12-09 15:29:11.196016012 +0000 UTC m=+1238.350760000" Dec 09 15:29:12 crc kubenswrapper[4716]: I1209 15:29:12.178521 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-998648c74-kgr69" event={"ID":"6890a044-24cb-49b2-9d60-105fa8e57573","Type":"ContainerStarted","Data":"31bb857cf039e053f081dacb3a93a9ada1f0e4cbc17875b63c668431cdb6e5c5"} Dec 09 15:29:12 crc kubenswrapper[4716]: I1209 15:29:12.179696 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-998648c74-kgr69" Dec 09 15:29:12 crc kubenswrapper[4716]: I1209 15:29:12.241481 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-2ft2d" event={"ID":"c2869bc2-fe1e-49a0-9bd5-ec2b1a23b489","Type":"ContainerStarted","Data":"8570e0820f9f7c18e126658eaa258f3c8069a9b861d097e1f6aaaf9028c829eb"} Dec 09 15:29:12 crc kubenswrapper[4716]: I1209 15:29:12.242799 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-2ft2d" Dec 09 15:29:12 crc kubenswrapper[4716]: I1209 15:29:12.248702 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-998648c74-kgr69" podStartSLOduration=5.519765575 podStartE2EDuration="45.248679581s" podCreationTimestamp="2025-12-09 15:28:27 +0000 UTC" firstStartedPulling="2025-12-09 15:28:30.943975127 +0000 UTC m=+1198.098719115" lastFinishedPulling="2025-12-09 15:29:10.672889133 +0000 UTC m=+1237.827633121" observedRunningTime="2025-12-09 15:29:12.229035133 +0000 UTC m=+1239.383779121" watchObservedRunningTime="2025-12-09 15:29:12.248679581 +0000 UTC m=+1239.403423569" Dec 09 15:29:12 crc kubenswrapper[4716]: I1209 15:29:12.269953 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-967d97867-hq8hn" event={"ID":"695a0bab-c2c8-4c7d-9420-2dc191000e54","Type":"ContainerStarted","Data":"fb0cefabbd6283898857817184a153c8a7a1153ff32210684fa3a1079e5913e7"} Dec 09 15:29:12 crc kubenswrapper[4716]: I1209 15:29:12.270895 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-967d97867-hq8hn" Dec 09 15:29:12 crc kubenswrapper[4716]: I1209 15:29:12.275699 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-2ft2d" podStartSLOduration=3.966358841 podStartE2EDuration="45.275676027s" podCreationTimestamp="2025-12-09 15:28:27 +0000 UTC" firstStartedPulling="2025-12-09 15:28:29.359903823 +0000 UTC m=+1196.514647811" lastFinishedPulling="2025-12-09 15:29:10.669221009 +0000 UTC m=+1237.823964997" observedRunningTime="2025-12-09 15:29:12.271145528 +0000 UTC m=+1239.425889516" watchObservedRunningTime="2025-12-09 15:29:12.275676027 +0000 UTC m=+1239.430420015" Dec 09 15:29:12 crc kubenswrapper[4716]: I1209 15:29:12.297369 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-h4nlt" event={"ID":"4d332287-0f6e-46d1-9cd9-d31dd855d753","Type":"ContainerStarted","Data":"54ca97ec97afe0b7e7aca46919911d958f4876604dc81045d05f12f0cd154ecc"} Dec 09 15:29:12 crc kubenswrapper[4716]: I1209 15:29:12.297739 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-h4nlt" Dec 09 15:29:12 crc kubenswrapper[4716]: I1209 15:29:12.324211 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-967d97867-hq8hn" podStartSLOduration=5.540121282 podStartE2EDuration="45.324195544s" podCreationTimestamp="2025-12-09 15:28:27 +0000 UTC" firstStartedPulling="2025-12-09 15:28:30.883213562 +0000 UTC m=+1198.037957550" lastFinishedPulling="2025-12-09 15:29:10.667287824 +0000 UTC m=+1237.822031812" observedRunningTime="2025-12-09 15:29:12.322123585 +0000 UTC m=+1239.476867583" watchObservedRunningTime="2025-12-09 15:29:12.324195544 +0000 UTC m=+1239.478939532" Dec 09 15:29:12 crc kubenswrapper[4716]: I1209 15:29:12.339939 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-qgr4j" event={"ID":"86831782-f8f0-40ad-99f7-4568185065b0","Type":"ContainerStarted","Data":"f56d8ec5985c45bccd56c17db01213e8283d5a46da43f267b8b9f4dab8ef238c"} Dec 09 15:29:12 crc kubenswrapper[4716]: I1209 15:29:12.340925 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-qgr4j" Dec 09 15:29:12 crc kubenswrapper[4716]: I1209 15:29:12.364514 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-pmct6" event={"ID":"f1029307-bcf9-40c0-b656-b7d203493022","Type":"ContainerStarted","Data":"a616c3d15bc8cb9d92d54c509c5bfb9d27c76e36e1e2bd0e4664c339b4364d3c"} Dec 09 15:29:12 crc kubenswrapper[4716]: I1209 15:29:12.364609 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-pmct6" Dec 09 15:29:12 crc kubenswrapper[4716]: I1209 15:29:12.373154 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-p6hrb" event={"ID":"ee92f03c-d515-4aa3-ad3c-cce0c89fd12b","Type":"ContainerStarted","Data":"5d692f865a1943dd375f7a01f8f60c156b1ba7d6b76d9d9cf0c82bae31ad4cc7"} Dec 09 15:29:12 crc kubenswrapper[4716]: I1209 15:29:12.373191 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-p6hrb" Dec 09 15:29:12 crc kubenswrapper[4716]: I1209 15:29:12.378664 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-h4nlt" podStartSLOduration=5.531727635 podStartE2EDuration="45.378619079s" podCreationTimestamp="2025-12-09 15:28:27 +0000 UTC" firstStartedPulling="2025-12-09 15:28:30.813117863 +0000 UTC m=+1197.967861851" lastFinishedPulling="2025-12-09 15:29:10.660009307 +0000 UTC m=+1237.814753295" observedRunningTime="2025-12-09 15:29:12.367952636 +0000 UTC m=+1239.522696624" watchObservedRunningTime="2025-12-09 15:29:12.378619079 +0000 UTC m=+1239.533363067" Dec 09 15:29:12 crc kubenswrapper[4716]: I1209 15:29:12.432038 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-pmct6" podStartSLOduration=5.860917239 podStartE2EDuration="45.432010825s" podCreationTimestamp="2025-12-09 15:28:27 +0000 UTC" firstStartedPulling="2025-12-09 15:28:30.942066323 +0000 UTC m=+1198.096810311" lastFinishedPulling="2025-12-09 15:29:10.513159909 +0000 UTC m=+1237.667903897" observedRunningTime="2025-12-09 15:29:12.38923883 +0000 UTC m=+1239.543982818" watchObservedRunningTime="2025-12-09 15:29:12.432010825 +0000 UTC m=+1239.586754813" Dec 09 15:29:12 crc kubenswrapper[4716]: I1209 15:29:12.459211 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-qgr4j" podStartSLOduration=4.681715917 podStartE2EDuration="45.459181986s" podCreationTimestamp="2025-12-09 15:28:27 +0000 UTC" firstStartedPulling="2025-12-09 15:28:29.872368409 +0000 UTC m=+1197.027112397" lastFinishedPulling="2025-12-09 15:29:10.649834478 +0000 UTC m=+1237.804578466" observedRunningTime="2025-12-09 15:29:12.453792023 +0000 UTC m=+1239.608536011" watchObservedRunningTime="2025-12-09 15:29:12.459181986 +0000 UTC m=+1239.613925974" Dec 09 15:29:12 crc kubenswrapper[4716]: I1209 15:29:12.508865 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-p6hrb" podStartSLOduration=4.911383756 podStartE2EDuration="45.508837575s" podCreationTimestamp="2025-12-09 15:28:27 +0000 UTC" firstStartedPulling="2025-12-09 15:28:30.070551655 +0000 UTC m=+1197.225295643" lastFinishedPulling="2025-12-09 15:29:10.668005474 +0000 UTC m=+1237.822749462" observedRunningTime="2025-12-09 15:29:12.487702555 +0000 UTC m=+1239.642446543" watchObservedRunningTime="2025-12-09 15:29:12.508837575 +0000 UTC m=+1239.663581563" Dec 09 15:29:13 crc kubenswrapper[4716]: I1209 15:29:13.384364 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-tf7dx" event={"ID":"08b1c536-2d1d-42fc-9e18-87ebe38bed16","Type":"ContainerStarted","Data":"e098f23a7b8aff699ac13299d0cdaf9e61e9af0952bd33f1bae98745d61e3f95"} Dec 09 15:29:13 crc kubenswrapper[4716]: I1209 15:29:13.408741 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-tf7dx" podStartSLOduration=5.224112593 podStartE2EDuration="46.408716618s" podCreationTimestamp="2025-12-09 15:28:27 +0000 UTC" firstStartedPulling="2025-12-09 15:28:30.836204148 +0000 UTC m=+1197.990948136" lastFinishedPulling="2025-12-09 15:29:12.020808173 +0000 UTC m=+1239.175552161" observedRunningTime="2025-12-09 15:29:13.40278851 +0000 UTC m=+1240.557532508" watchObservedRunningTime="2025-12-09 15:29:13.408716618 +0000 UTC m=+1240.563460606" Dec 09 15:29:17 crc kubenswrapper[4716]: I1209 15:29:17.395542 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-xzp8n" Dec 09 15:29:17 crc kubenswrapper[4716]: I1209 15:29:17.425806 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-dzpdl" event={"ID":"30c82b79-c03d-45d3-8b0a-ca506daf2934","Type":"ContainerStarted","Data":"08010d718ed0c04fe624aa057b1e388151bb37b3ba63ad722633d411dca2da5d"} Dec 09 15:29:17 crc kubenswrapper[4716]: I1209 15:29:17.426181 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-dzpdl" event={"ID":"30c82b79-c03d-45d3-8b0a-ca506daf2934","Type":"ContainerStarted","Data":"dca2173d680c021572c4cff229e9a7e4de702be17d1a64b9973e5eb3e4f0e7fa"} Dec 09 15:29:17 crc kubenswrapper[4716]: I1209 15:29:17.427509 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-dzpdl" Dec 09 15:29:17 crc kubenswrapper[4716]: I1209 15:29:17.436257 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-sktgh" event={"ID":"b5a74069-1167-4e2a-a3b2-c11507b121ab","Type":"ContainerStarted","Data":"b5aff09be8835efc5de26948677fc6147b159bd5b2ee73307d7029a32455d97d"} Dec 09 15:29:17 crc kubenswrapper[4716]: I1209 15:29:17.437330 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-sktgh" Dec 09 15:29:17 crc kubenswrapper[4716]: I1209 15:29:17.447289 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-dzpdl" podStartSLOduration=4.486872577 podStartE2EDuration="50.447271821s" podCreationTimestamp="2025-12-09 15:28:27 +0000 UTC" firstStartedPulling="2025-12-09 15:28:30.947983901 +0000 UTC m=+1198.102727889" lastFinishedPulling="2025-12-09 15:29:16.908383145 +0000 UTC m=+1244.063127133" observedRunningTime="2025-12-09 15:29:17.446589492 +0000 UTC m=+1244.601333480" watchObservedRunningTime="2025-12-09 15:29:17.447271821 +0000 UTC m=+1244.602015809" Dec 09 15:29:17 crc kubenswrapper[4716]: I1209 15:29:17.448288 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-p6hrb" Dec 09 15:29:17 crc kubenswrapper[4716]: I1209 15:29:17.472307 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdbbht" event={"ID":"05653cf9-bc33-44bb-acdd-21dd610a7665","Type":"ContainerStarted","Data":"6e7e88bd1cbe68acb00cd7b2862ef2b05e5fc5ad44c28c0ec0e79680f631127b"} Dec 09 15:29:17 crc kubenswrapper[4716]: I1209 15:29:17.482695 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-sktgh" podStartSLOduration=35.096364649 podStartE2EDuration="50.482673856s" podCreationTimestamp="2025-12-09 15:28:27 +0000 UTC" firstStartedPulling="2025-12-09 15:29:01.512591479 +0000 UTC m=+1228.667335467" lastFinishedPulling="2025-12-09 15:29:16.898900696 +0000 UTC m=+1244.053644674" observedRunningTime="2025-12-09 15:29:17.472724854 +0000 UTC m=+1244.627468842" watchObservedRunningTime="2025-12-09 15:29:17.482673856 +0000 UTC m=+1244.637417844" Dec 09 15:29:17 crc kubenswrapper[4716]: I1209 15:29:17.507728 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-s7mv7" Dec 09 15:29:17 crc kubenswrapper[4716]: I1209 15:29:17.536902 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-2ft2d" Dec 09 15:29:17 crc kubenswrapper[4716]: I1209 15:29:17.577091 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-9d9ms" Dec 09 15:29:17 crc kubenswrapper[4716]: I1209 15:29:17.582515 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-967d97867-hq8hn" Dec 09 15:29:17 crc kubenswrapper[4716]: I1209 15:29:17.922798 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 15:29:17 crc kubenswrapper[4716]: I1209 15:29:17.922863 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 15:29:17 crc kubenswrapper[4716]: I1209 15:29:17.962301 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-5pz92" Dec 09 15:29:17 crc kubenswrapper[4716]: I1209 15:29:17.970227 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-pmct6" Dec 09 15:29:17 crc kubenswrapper[4716]: I1209 15:29:17.995262 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-qgr4j" Dec 09 15:29:18 crc kubenswrapper[4716]: I1209 15:29:18.113823 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-998648c74-kgr69" Dec 09 15:29:18 crc kubenswrapper[4716]: I1209 15:29:18.159591 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-649h2" Dec 09 15:29:18 crc kubenswrapper[4716]: I1209 15:29:18.179874 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-78f8948974-fwnp7" Dec 09 15:29:18 crc kubenswrapper[4716]: I1209 15:29:18.206434 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-h4nlt" Dec 09 15:29:18 crc kubenswrapper[4716]: I1209 15:29:18.247479 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-5656d9bf69-llf2c" Dec 09 15:29:18 crc kubenswrapper[4716]: I1209 15:29:18.345526 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5854674fcc-kspms" Dec 09 15:29:18 crc kubenswrapper[4716]: I1209 15:29:18.394285 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-s7dkv" Dec 09 15:29:18 crc kubenswrapper[4716]: I1209 15:29:18.481540 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-sktgh" event={"ID":"b5a74069-1167-4e2a-a3b2-c11507b121ab","Type":"ContainerStarted","Data":"cd5df0d2f25f9d51e82d082bc5d0d08c49cc09b85ecbe11eea4206bf6bc8b1dc"} Dec 09 15:29:18 crc kubenswrapper[4716]: I1209 15:29:18.487123 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdbbht" event={"ID":"05653cf9-bc33-44bb-acdd-21dd610a7665","Type":"ContainerStarted","Data":"4fb8a08f8d925182509753a304db78d7448059de570d3b0b3dcd5baf9edb09b4"} Dec 09 15:29:18 crc kubenswrapper[4716]: I1209 15:29:18.487275 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdbbht" Dec 09 15:29:18 crc kubenswrapper[4716]: I1209 15:29:18.528924 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdbbht" podStartSLOduration=41.930066923 podStartE2EDuration="51.528904084s" podCreationTimestamp="2025-12-09 15:28:27 +0000 UTC" firstStartedPulling="2025-12-09 15:29:07.303193554 +0000 UTC m=+1234.457937542" lastFinishedPulling="2025-12-09 15:29:16.902030715 +0000 UTC m=+1244.056774703" observedRunningTime="2025-12-09 15:29:18.522151592 +0000 UTC m=+1245.676895580" watchObservedRunningTime="2025-12-09 15:29:18.528904084 +0000 UTC m=+1245.683648072" Dec 09 15:29:20 crc kubenswrapper[4716]: I1209 15:29:20.146400 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-5955d8c9f-mpmbx" Dec 09 15:29:23 crc kubenswrapper[4716]: I1209 15:29:23.225318 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-sktgh" Dec 09 15:29:25 crc kubenswrapper[4716]: I1209 15:29:25.540241 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-szpds" event={"ID":"436bb095-8486-4d67-9fc5-6596738065cc","Type":"ContainerStarted","Data":"5a570f83a99cbd9b4b8455156526156f8a62e829361717bba1fe823b126f8cb5"} Dec 09 15:29:25 crc kubenswrapper[4716]: I1209 15:29:25.541380 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-szpds" Dec 09 15:29:25 crc kubenswrapper[4716]: I1209 15:29:25.560398 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-szpds" podStartSLOduration=4.829848692 podStartE2EDuration="58.56038123s" podCreationTimestamp="2025-12-09 15:28:27 +0000 UTC" firstStartedPulling="2025-12-09 15:28:30.943655328 +0000 UTC m=+1198.098399316" lastFinishedPulling="2025-12-09 15:29:24.674187866 +0000 UTC m=+1251.828931854" observedRunningTime="2025-12-09 15:29:25.55577624 +0000 UTC m=+1252.710520228" watchObservedRunningTime="2025-12-09 15:29:25.56038123 +0000 UTC m=+1252.715125218" Dec 09 15:29:27 crc kubenswrapper[4716]: I1209 15:29:27.603272 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-dzpdl" Dec 09 15:29:29 crc kubenswrapper[4716]: I1209 15:29:29.940102 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fdbbht" Dec 09 15:29:37 crc kubenswrapper[4716]: I1209 15:29:37.752574 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-szpds" Dec 09 15:29:47 crc kubenswrapper[4716]: I1209 15:29:47.922525 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 15:29:47 crc kubenswrapper[4716]: I1209 15:29:47.924289 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 15:29:47 crc kubenswrapper[4716]: I1209 15:29:47.924457 4716 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" Dec 09 15:29:47 crc kubenswrapper[4716]: I1209 15:29:47.925809 4716 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"51f7652ef00c2c1bb2d4496bda7b4d5fed9d9a21f2c7f785c34588994268c9b1"} pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 15:29:47 crc kubenswrapper[4716]: I1209 15:29:47.925962 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" containerID="cri-o://51f7652ef00c2c1bb2d4496bda7b4d5fed9d9a21f2c7f785c34588994268c9b1" gracePeriod=600 Dec 09 15:29:48 crc kubenswrapper[4716]: E1209 15:29:48.262764 4716 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd92cd91c_19c2_4865_a522_6d1e3a4cd6a5.slice/crio-conmon-51f7652ef00c2c1bb2d4496bda7b4d5fed9d9a21f2c7f785c34588994268c9b1.scope\": RecentStats: unable to find data in memory cache]" Dec 09 15:29:48 crc kubenswrapper[4716]: E1209 15:29:48.262950 4716 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd92cd91c_19c2_4865_a522_6d1e3a4cd6a5.slice/crio-conmon-51f7652ef00c2c1bb2d4496bda7b4d5fed9d9a21f2c7f785c34588994268c9b1.scope\": RecentStats: unable to find data in memory cache]" Dec 09 15:29:48 crc kubenswrapper[4716]: I1209 15:29:48.739337 4716 generic.go:334] "Generic (PLEG): container finished" podID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerID="51f7652ef00c2c1bb2d4496bda7b4d5fed9d9a21f2c7f785c34588994268c9b1" exitCode=0 Dec 09 15:29:48 crc kubenswrapper[4716]: I1209 15:29:48.739387 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerDied","Data":"51f7652ef00c2c1bb2d4496bda7b4d5fed9d9a21f2c7f785c34588994268c9b1"} Dec 09 15:29:48 crc kubenswrapper[4716]: I1209 15:29:48.739760 4716 scope.go:117] "RemoveContainer" containerID="34321690e545d4c8bbe42a5bb706305ba7bf7764ef04b070ddeba60a6895e655" Dec 09 15:29:49 crc kubenswrapper[4716]: I1209 15:29:49.751017 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerStarted","Data":"86592a964b516fe613abb12daefa0047ee74a39b779e6980fea7c1589b5faf81"} Dec 09 15:29:56 crc kubenswrapper[4716]: I1209 15:29:56.296182 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-rhfr8"] Dec 09 15:29:56 crc kubenswrapper[4716]: I1209 15:29:56.298248 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-rhfr8" Dec 09 15:29:56 crc kubenswrapper[4716]: I1209 15:29:56.300687 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Dec 09 15:29:56 crc kubenswrapper[4716]: I1209 15:29:56.300940 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Dec 09 15:29:56 crc kubenswrapper[4716]: I1209 15:29:56.301253 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-tn9fd" Dec 09 15:29:56 crc kubenswrapper[4716]: I1209 15:29:56.312458 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Dec 09 15:29:56 crc kubenswrapper[4716]: I1209 15:29:56.341509 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-rhfr8"] Dec 09 15:29:56 crc kubenswrapper[4716]: I1209 15:29:56.552051 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/21ae3fb4-b9a2-43bd-9ce1-8235dcf4e42d-config\") pod \"dnsmasq-dns-675f4bcbfc-rhfr8\" (UID: \"21ae3fb4-b9a2-43bd-9ce1-8235dcf4e42d\") " pod="openstack/dnsmasq-dns-675f4bcbfc-rhfr8" Dec 09 15:29:56 crc kubenswrapper[4716]: I1209 15:29:56.552207 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gtz9x\" (UniqueName: \"kubernetes.io/projected/21ae3fb4-b9a2-43bd-9ce1-8235dcf4e42d-kube-api-access-gtz9x\") pod \"dnsmasq-dns-675f4bcbfc-rhfr8\" (UID: \"21ae3fb4-b9a2-43bd-9ce1-8235dcf4e42d\") " pod="openstack/dnsmasq-dns-675f4bcbfc-rhfr8" Dec 09 15:29:56 crc kubenswrapper[4716]: I1209 15:29:56.586716 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-p98qb"] Dec 09 15:29:56 crc kubenswrapper[4716]: I1209 15:29:56.588497 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-p98qb" Dec 09 15:29:56 crc kubenswrapper[4716]: I1209 15:29:56.591939 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Dec 09 15:29:56 crc kubenswrapper[4716]: I1209 15:29:56.625167 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-p98qb"] Dec 09 15:29:56 crc kubenswrapper[4716]: I1209 15:29:56.653161 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gtz9x\" (UniqueName: \"kubernetes.io/projected/21ae3fb4-b9a2-43bd-9ce1-8235dcf4e42d-kube-api-access-gtz9x\") pod \"dnsmasq-dns-675f4bcbfc-rhfr8\" (UID: \"21ae3fb4-b9a2-43bd-9ce1-8235dcf4e42d\") " pod="openstack/dnsmasq-dns-675f4bcbfc-rhfr8" Dec 09 15:29:56 crc kubenswrapper[4716]: I1209 15:29:56.653239 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/72871dec-a60a-4e54-86a0-be8f91c3fde3-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-p98qb\" (UID: \"72871dec-a60a-4e54-86a0-be8f91c3fde3\") " pod="openstack/dnsmasq-dns-78dd6ddcc-p98qb" Dec 09 15:29:56 crc kubenswrapper[4716]: I1209 15:29:56.653282 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/21ae3fb4-b9a2-43bd-9ce1-8235dcf4e42d-config\") pod \"dnsmasq-dns-675f4bcbfc-rhfr8\" (UID: \"21ae3fb4-b9a2-43bd-9ce1-8235dcf4e42d\") " pod="openstack/dnsmasq-dns-675f4bcbfc-rhfr8" Dec 09 15:29:56 crc kubenswrapper[4716]: I1209 15:29:56.653326 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qmgrp\" (UniqueName: \"kubernetes.io/projected/72871dec-a60a-4e54-86a0-be8f91c3fde3-kube-api-access-qmgrp\") pod \"dnsmasq-dns-78dd6ddcc-p98qb\" (UID: \"72871dec-a60a-4e54-86a0-be8f91c3fde3\") " pod="openstack/dnsmasq-dns-78dd6ddcc-p98qb" Dec 09 15:29:56 crc kubenswrapper[4716]: I1209 15:29:56.653362 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72871dec-a60a-4e54-86a0-be8f91c3fde3-config\") pod \"dnsmasq-dns-78dd6ddcc-p98qb\" (UID: \"72871dec-a60a-4e54-86a0-be8f91c3fde3\") " pod="openstack/dnsmasq-dns-78dd6ddcc-p98qb" Dec 09 15:29:56 crc kubenswrapper[4716]: I1209 15:29:56.654226 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/21ae3fb4-b9a2-43bd-9ce1-8235dcf4e42d-config\") pod \"dnsmasq-dns-675f4bcbfc-rhfr8\" (UID: \"21ae3fb4-b9a2-43bd-9ce1-8235dcf4e42d\") " pod="openstack/dnsmasq-dns-675f4bcbfc-rhfr8" Dec 09 15:29:56 crc kubenswrapper[4716]: I1209 15:29:56.706095 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gtz9x\" (UniqueName: \"kubernetes.io/projected/21ae3fb4-b9a2-43bd-9ce1-8235dcf4e42d-kube-api-access-gtz9x\") pod \"dnsmasq-dns-675f4bcbfc-rhfr8\" (UID: \"21ae3fb4-b9a2-43bd-9ce1-8235dcf4e42d\") " pod="openstack/dnsmasq-dns-675f4bcbfc-rhfr8" Dec 09 15:29:56 crc kubenswrapper[4716]: I1209 15:29:56.754555 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/72871dec-a60a-4e54-86a0-be8f91c3fde3-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-p98qb\" (UID: \"72871dec-a60a-4e54-86a0-be8f91c3fde3\") " pod="openstack/dnsmasq-dns-78dd6ddcc-p98qb" Dec 09 15:29:56 crc kubenswrapper[4716]: I1209 15:29:56.754652 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qmgrp\" (UniqueName: \"kubernetes.io/projected/72871dec-a60a-4e54-86a0-be8f91c3fde3-kube-api-access-qmgrp\") pod \"dnsmasq-dns-78dd6ddcc-p98qb\" (UID: \"72871dec-a60a-4e54-86a0-be8f91c3fde3\") " pod="openstack/dnsmasq-dns-78dd6ddcc-p98qb" Dec 09 15:29:56 crc kubenswrapper[4716]: I1209 15:29:56.754706 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72871dec-a60a-4e54-86a0-be8f91c3fde3-config\") pod \"dnsmasq-dns-78dd6ddcc-p98qb\" (UID: \"72871dec-a60a-4e54-86a0-be8f91c3fde3\") " pod="openstack/dnsmasq-dns-78dd6ddcc-p98qb" Dec 09 15:29:56 crc kubenswrapper[4716]: I1209 15:29:56.755702 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/72871dec-a60a-4e54-86a0-be8f91c3fde3-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-p98qb\" (UID: \"72871dec-a60a-4e54-86a0-be8f91c3fde3\") " pod="openstack/dnsmasq-dns-78dd6ddcc-p98qb" Dec 09 15:29:56 crc kubenswrapper[4716]: I1209 15:29:56.755750 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72871dec-a60a-4e54-86a0-be8f91c3fde3-config\") pod \"dnsmasq-dns-78dd6ddcc-p98qb\" (UID: \"72871dec-a60a-4e54-86a0-be8f91c3fde3\") " pod="openstack/dnsmasq-dns-78dd6ddcc-p98qb" Dec 09 15:29:56 crc kubenswrapper[4716]: I1209 15:29:56.776611 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qmgrp\" (UniqueName: \"kubernetes.io/projected/72871dec-a60a-4e54-86a0-be8f91c3fde3-kube-api-access-qmgrp\") pod \"dnsmasq-dns-78dd6ddcc-p98qb\" (UID: \"72871dec-a60a-4e54-86a0-be8f91c3fde3\") " pod="openstack/dnsmasq-dns-78dd6ddcc-p98qb" Dec 09 15:29:56 crc kubenswrapper[4716]: I1209 15:29:56.914050 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-p98qb" Dec 09 15:29:56 crc kubenswrapper[4716]: I1209 15:29:56.919889 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-rhfr8" Dec 09 15:29:57 crc kubenswrapper[4716]: I1209 15:29:57.694564 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-rhfr8"] Dec 09 15:29:57 crc kubenswrapper[4716]: I1209 15:29:57.711049 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-p98qb"] Dec 09 15:29:57 crc kubenswrapper[4716]: I1209 15:29:57.878119 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-rhfr8" event={"ID":"21ae3fb4-b9a2-43bd-9ce1-8235dcf4e42d","Type":"ContainerStarted","Data":"6300caae40fb38bcb4696a7e5f12a69bca2aff8151bb1e78355498dd208f0ee5"} Dec 09 15:29:57 crc kubenswrapper[4716]: I1209 15:29:57.879416 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-p98qb" event={"ID":"72871dec-a60a-4e54-86a0-be8f91c3fde3","Type":"ContainerStarted","Data":"23e5539cdddcae1c1046e5bbca2db657f5586e731f10ef44614446b39689008d"} Dec 09 15:29:58 crc kubenswrapper[4716]: I1209 15:29:58.657589 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-rhfr8"] Dec 09 15:29:58 crc kubenswrapper[4716]: I1209 15:29:58.681025 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-h54jn"] Dec 09 15:29:58 crc kubenswrapper[4716]: I1209 15:29:58.684198 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-h54jn" Dec 09 15:29:58 crc kubenswrapper[4716]: I1209 15:29:58.740457 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-h54jn"] Dec 09 15:29:58 crc kubenswrapper[4716]: I1209 15:29:58.775337 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d166dd61-0ac4-4e96-88c6-59ac154db496-config\") pod \"dnsmasq-dns-666b6646f7-h54jn\" (UID: \"d166dd61-0ac4-4e96-88c6-59ac154db496\") " pod="openstack/dnsmasq-dns-666b6646f7-h54jn" Dec 09 15:29:58 crc kubenswrapper[4716]: I1209 15:29:58.775400 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d166dd61-0ac4-4e96-88c6-59ac154db496-dns-svc\") pod \"dnsmasq-dns-666b6646f7-h54jn\" (UID: \"d166dd61-0ac4-4e96-88c6-59ac154db496\") " pod="openstack/dnsmasq-dns-666b6646f7-h54jn" Dec 09 15:29:58 crc kubenswrapper[4716]: I1209 15:29:58.775450 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v4jqq\" (UniqueName: \"kubernetes.io/projected/d166dd61-0ac4-4e96-88c6-59ac154db496-kube-api-access-v4jqq\") pod \"dnsmasq-dns-666b6646f7-h54jn\" (UID: \"d166dd61-0ac4-4e96-88c6-59ac154db496\") " pod="openstack/dnsmasq-dns-666b6646f7-h54jn" Dec 09 15:29:58 crc kubenswrapper[4716]: I1209 15:29:58.880729 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d166dd61-0ac4-4e96-88c6-59ac154db496-config\") pod \"dnsmasq-dns-666b6646f7-h54jn\" (UID: \"d166dd61-0ac4-4e96-88c6-59ac154db496\") " pod="openstack/dnsmasq-dns-666b6646f7-h54jn" Dec 09 15:29:58 crc kubenswrapper[4716]: I1209 15:29:58.880826 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d166dd61-0ac4-4e96-88c6-59ac154db496-dns-svc\") pod \"dnsmasq-dns-666b6646f7-h54jn\" (UID: \"d166dd61-0ac4-4e96-88c6-59ac154db496\") " pod="openstack/dnsmasq-dns-666b6646f7-h54jn" Dec 09 15:29:58 crc kubenswrapper[4716]: I1209 15:29:58.881038 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v4jqq\" (UniqueName: \"kubernetes.io/projected/d166dd61-0ac4-4e96-88c6-59ac154db496-kube-api-access-v4jqq\") pod \"dnsmasq-dns-666b6646f7-h54jn\" (UID: \"d166dd61-0ac4-4e96-88c6-59ac154db496\") " pod="openstack/dnsmasq-dns-666b6646f7-h54jn" Dec 09 15:29:58 crc kubenswrapper[4716]: I1209 15:29:58.881914 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d166dd61-0ac4-4e96-88c6-59ac154db496-config\") pod \"dnsmasq-dns-666b6646f7-h54jn\" (UID: \"d166dd61-0ac4-4e96-88c6-59ac154db496\") " pod="openstack/dnsmasq-dns-666b6646f7-h54jn" Dec 09 15:29:58 crc kubenswrapper[4716]: I1209 15:29:58.882388 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d166dd61-0ac4-4e96-88c6-59ac154db496-dns-svc\") pod \"dnsmasq-dns-666b6646f7-h54jn\" (UID: \"d166dd61-0ac4-4e96-88c6-59ac154db496\") " pod="openstack/dnsmasq-dns-666b6646f7-h54jn" Dec 09 15:29:58 crc kubenswrapper[4716]: I1209 15:29:58.931117 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v4jqq\" (UniqueName: \"kubernetes.io/projected/d166dd61-0ac4-4e96-88c6-59ac154db496-kube-api-access-v4jqq\") pod \"dnsmasq-dns-666b6646f7-h54jn\" (UID: \"d166dd61-0ac4-4e96-88c6-59ac154db496\") " pod="openstack/dnsmasq-dns-666b6646f7-h54jn" Dec 09 15:29:59 crc kubenswrapper[4716]: I1209 15:29:59.032640 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-h54jn" Dec 09 15:29:59 crc kubenswrapper[4716]: I1209 15:29:59.600762 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-p98qb"] Dec 09 15:29:59 crc kubenswrapper[4716]: I1209 15:29:59.607732 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-784v8"] Dec 09 15:29:59 crc kubenswrapper[4716]: I1209 15:29:59.609828 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-784v8" Dec 09 15:29:59 crc kubenswrapper[4716]: I1209 15:29:59.857533 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e40d00b-b8ee-4cf9-859a-5e92c49182c6-config\") pod \"dnsmasq-dns-57d769cc4f-784v8\" (UID: \"5e40d00b-b8ee-4cf9-859a-5e92c49182c6\") " pod="openstack/dnsmasq-dns-57d769cc4f-784v8" Dec 09 15:29:59 crc kubenswrapper[4716]: I1209 15:29:59.857951 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5e40d00b-b8ee-4cf9-859a-5e92c49182c6-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-784v8\" (UID: \"5e40d00b-b8ee-4cf9-859a-5e92c49182c6\") " pod="openstack/dnsmasq-dns-57d769cc4f-784v8" Dec 09 15:29:59 crc kubenswrapper[4716]: I1209 15:29:59.858130 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vh6lb\" (UniqueName: \"kubernetes.io/projected/5e40d00b-b8ee-4cf9-859a-5e92c49182c6-kube-api-access-vh6lb\") pod \"dnsmasq-dns-57d769cc4f-784v8\" (UID: \"5e40d00b-b8ee-4cf9-859a-5e92c49182c6\") " pod="openstack/dnsmasq-dns-57d769cc4f-784v8" Dec 09 15:29:59 crc kubenswrapper[4716]: I1209 15:29:59.905579 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-784v8"] Dec 09 15:29:59 crc kubenswrapper[4716]: I1209 15:29:59.959126 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5e40d00b-b8ee-4cf9-859a-5e92c49182c6-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-784v8\" (UID: \"5e40d00b-b8ee-4cf9-859a-5e92c49182c6\") " pod="openstack/dnsmasq-dns-57d769cc4f-784v8" Dec 09 15:29:59 crc kubenswrapper[4716]: I1209 15:29:59.959231 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vh6lb\" (UniqueName: \"kubernetes.io/projected/5e40d00b-b8ee-4cf9-859a-5e92c49182c6-kube-api-access-vh6lb\") pod \"dnsmasq-dns-57d769cc4f-784v8\" (UID: \"5e40d00b-b8ee-4cf9-859a-5e92c49182c6\") " pod="openstack/dnsmasq-dns-57d769cc4f-784v8" Dec 09 15:29:59 crc kubenswrapper[4716]: I1209 15:29:59.959289 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e40d00b-b8ee-4cf9-859a-5e92c49182c6-config\") pod \"dnsmasq-dns-57d769cc4f-784v8\" (UID: \"5e40d00b-b8ee-4cf9-859a-5e92c49182c6\") " pod="openstack/dnsmasq-dns-57d769cc4f-784v8" Dec 09 15:29:59 crc kubenswrapper[4716]: I1209 15:29:59.960229 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e40d00b-b8ee-4cf9-859a-5e92c49182c6-config\") pod \"dnsmasq-dns-57d769cc4f-784v8\" (UID: \"5e40d00b-b8ee-4cf9-859a-5e92c49182c6\") " pod="openstack/dnsmasq-dns-57d769cc4f-784v8" Dec 09 15:29:59 crc kubenswrapper[4716]: I1209 15:29:59.964787 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5e40d00b-b8ee-4cf9-859a-5e92c49182c6-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-784v8\" (UID: \"5e40d00b-b8ee-4cf9-859a-5e92c49182c6\") " pod="openstack/dnsmasq-dns-57d769cc4f-784v8" Dec 09 15:29:59 crc kubenswrapper[4716]: I1209 15:29:59.978270 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Dec 09 15:29:59 crc kubenswrapper[4716]: I1209 15:29:59.980650 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 09 15:29:59 crc kubenswrapper[4716]: I1209 15:29:59.986863 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Dec 09 15:29:59 crc kubenswrapper[4716]: I1209 15:29:59.987276 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Dec 09 15:29:59 crc kubenswrapper[4716]: I1209 15:29:59.996543 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-t42b4" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.002934 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.005981 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.008469 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.008756 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.024750 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vh6lb\" (UniqueName: \"kubernetes.io/projected/5e40d00b-b8ee-4cf9-859a-5e92c49182c6-kube-api-access-vh6lb\") pod \"dnsmasq-dns-57d769cc4f-784v8\" (UID: \"5e40d00b-b8ee-4cf9-859a-5e92c49182c6\") " pod="openstack/dnsmasq-dns-57d769cc4f-784v8" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.034002 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.269422 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-784v8" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.271093 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2e140762-44f7-46f9-9bbe-a8f780186869-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " pod="openstack/rabbitmq-server-0" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.271172 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-server-0\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " pod="openstack/rabbitmq-server-0" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.271202 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2e140762-44f7-46f9-9bbe-a8f780186869-pod-info\") pod \"rabbitmq-server-0\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " pod="openstack/rabbitmq-server-0" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.271233 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2e140762-44f7-46f9-9bbe-a8f780186869-server-conf\") pod \"rabbitmq-server-0\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " pod="openstack/rabbitmq-server-0" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.271266 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2e140762-44f7-46f9-9bbe-a8f780186869-config-data\") pod \"rabbitmq-server-0\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " pod="openstack/rabbitmq-server-0" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.271292 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2e140762-44f7-46f9-9bbe-a8f780186869-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " pod="openstack/rabbitmq-server-0" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.271361 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2e140762-44f7-46f9-9bbe-a8f780186869-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " pod="openstack/rabbitmq-server-0" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.271386 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2e140762-44f7-46f9-9bbe-a8f780186869-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " pod="openstack/rabbitmq-server-0" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.271421 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2e140762-44f7-46f9-9bbe-a8f780186869-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " pod="openstack/rabbitmq-server-0" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.271451 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-25bbc\" (UniqueName: \"kubernetes.io/projected/2e140762-44f7-46f9-9bbe-a8f780186869-kube-api-access-25bbc\") pod \"rabbitmq-server-0\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " pod="openstack/rabbitmq-server-0" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.271487 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2e140762-44f7-46f9-9bbe-a8f780186869-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " pod="openstack/rabbitmq-server-0" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.344983 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421570-smgjt"] Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.346999 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421570-smgjt" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.365226 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.365440 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.373225 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3cb80e47-ed2e-48c3-8332-346daa870065-config-volume\") pod \"collect-profiles-29421570-smgjt\" (UID: \"3cb80e47-ed2e-48c3-8332-346daa870065\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421570-smgjt" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.373276 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3cb80e47-ed2e-48c3-8332-346daa870065-secret-volume\") pod \"collect-profiles-29421570-smgjt\" (UID: \"3cb80e47-ed2e-48c3-8332-346daa870065\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421570-smgjt" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.373336 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2e140762-44f7-46f9-9bbe-a8f780186869-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " pod="openstack/rabbitmq-server-0" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.373360 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2e140762-44f7-46f9-9bbe-a8f780186869-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " pod="openstack/rabbitmq-server-0" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.373394 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2e140762-44f7-46f9-9bbe-a8f780186869-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " pod="openstack/rabbitmq-server-0" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.373439 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-25bbc\" (UniqueName: \"kubernetes.io/projected/2e140762-44f7-46f9-9bbe-a8f780186869-kube-api-access-25bbc\") pod \"rabbitmq-server-0\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " pod="openstack/rabbitmq-server-0" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.373475 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2e140762-44f7-46f9-9bbe-a8f780186869-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " pod="openstack/rabbitmq-server-0" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.373515 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g8pw9\" (UniqueName: \"kubernetes.io/projected/3cb80e47-ed2e-48c3-8332-346daa870065-kube-api-access-g8pw9\") pod \"collect-profiles-29421570-smgjt\" (UID: \"3cb80e47-ed2e-48c3-8332-346daa870065\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421570-smgjt" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.373664 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2e140762-44f7-46f9-9bbe-a8f780186869-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " pod="openstack/rabbitmq-server-0" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.373696 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-server-0\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " pod="openstack/rabbitmq-server-0" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.373730 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2e140762-44f7-46f9-9bbe-a8f780186869-pod-info\") pod \"rabbitmq-server-0\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " pod="openstack/rabbitmq-server-0" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.374743 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2e140762-44f7-46f9-9bbe-a8f780186869-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " pod="openstack/rabbitmq-server-0" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.375010 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2e140762-44f7-46f9-9bbe-a8f780186869-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " pod="openstack/rabbitmq-server-0" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.376147 4716 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-server-0\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/rabbitmq-server-0" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.380364 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2e140762-44f7-46f9-9bbe-a8f780186869-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " pod="openstack/rabbitmq-server-0" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.373767 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2e140762-44f7-46f9-9bbe-a8f780186869-server-conf\") pod \"rabbitmq-server-0\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " pod="openstack/rabbitmq-server-0" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.383153 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2e140762-44f7-46f9-9bbe-a8f780186869-config-data\") pod \"rabbitmq-server-0\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " pod="openstack/rabbitmq-server-0" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.383210 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2e140762-44f7-46f9-9bbe-a8f780186869-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " pod="openstack/rabbitmq-server-0" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.384615 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2e140762-44f7-46f9-9bbe-a8f780186869-config-data\") pod \"rabbitmq-server-0\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " pod="openstack/rabbitmq-server-0" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.386019 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2e140762-44f7-46f9-9bbe-a8f780186869-server-conf\") pod \"rabbitmq-server-0\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " pod="openstack/rabbitmq-server-0" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.387577 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2e140762-44f7-46f9-9bbe-a8f780186869-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " pod="openstack/rabbitmq-server-0" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.400889 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2e140762-44f7-46f9-9bbe-a8f780186869-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " pod="openstack/rabbitmq-server-0" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.405494 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421570-smgjt"] Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.424455 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2e140762-44f7-46f9-9bbe-a8f780186869-pod-info\") pod \"rabbitmq-server-0\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " pod="openstack/rabbitmq-server-0" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.443085 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2e140762-44f7-46f9-9bbe-a8f780186869-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " pod="openstack/rabbitmq-server-0" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.459513 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-server-0\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " pod="openstack/rabbitmq-server-0" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.469228 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-25bbc\" (UniqueName: \"kubernetes.io/projected/2e140762-44f7-46f9-9bbe-a8f780186869-kube-api-access-25bbc\") pod \"rabbitmq-server-0\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " pod="openstack/rabbitmq-server-0" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.507726 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3cb80e47-ed2e-48c3-8332-346daa870065-config-volume\") pod \"collect-profiles-29421570-smgjt\" (UID: \"3cb80e47-ed2e-48c3-8332-346daa870065\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421570-smgjt" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.507832 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3cb80e47-ed2e-48c3-8332-346daa870065-secret-volume\") pod \"collect-profiles-29421570-smgjt\" (UID: \"3cb80e47-ed2e-48c3-8332-346daa870065\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421570-smgjt" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.508126 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g8pw9\" (UniqueName: \"kubernetes.io/projected/3cb80e47-ed2e-48c3-8332-346daa870065-kube-api-access-g8pw9\") pod \"collect-profiles-29421570-smgjt\" (UID: \"3cb80e47-ed2e-48c3-8332-346daa870065\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421570-smgjt" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.670690 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3cb80e47-ed2e-48c3-8332-346daa870065-config-volume\") pod \"collect-profiles-29421570-smgjt\" (UID: \"3cb80e47-ed2e-48c3-8332-346daa870065\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421570-smgjt" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.675476 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.681134 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3cb80e47-ed2e-48c3-8332-346daa870065-secret-volume\") pod \"collect-profiles-29421570-smgjt\" (UID: \"3cb80e47-ed2e-48c3-8332-346daa870065\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421570-smgjt" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.711012 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g8pw9\" (UniqueName: \"kubernetes.io/projected/3cb80e47-ed2e-48c3-8332-346daa870065-kube-api-access-g8pw9\") pod \"collect-profiles-29421570-smgjt\" (UID: \"3cb80e47-ed2e-48c3-8332-346daa870065\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421570-smgjt" Dec 09 15:30:00 crc kubenswrapper[4716]: I1209 15:30:00.802579 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421570-smgjt" Dec 09 15:30:01 crc kubenswrapper[4716]: I1209 15:30:01.302817 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 09 15:30:01 crc kubenswrapper[4716]: I1209 15:30:01.305070 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:30:01 crc kubenswrapper[4716]: I1209 15:30:01.330089 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Dec 09 15:30:01 crc kubenswrapper[4716]: I1209 15:30:01.330427 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Dec 09 15:30:01 crc kubenswrapper[4716]: I1209 15:30:01.330981 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Dec 09 15:30:01 crc kubenswrapper[4716]: I1209 15:30:01.331158 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-b9frm" Dec 09 15:30:01 crc kubenswrapper[4716]: I1209 15:30:01.331424 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Dec 09 15:30:01 crc kubenswrapper[4716]: I1209 15:30:01.331790 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Dec 09 15:30:01 crc kubenswrapper[4716]: I1209 15:30:01.332093 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Dec 09 15:30:01 crc kubenswrapper[4716]: I1209 15:30:01.334397 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.022221 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f417726f-0022-42f5-bfe8-79f6605d557c-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.024797 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7p9l2\" (UniqueName: \"kubernetes.io/projected/f417726f-0022-42f5-bfe8-79f6605d557c-kube-api-access-7p9l2\") pod \"rabbitmq-cell1-server-0\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.024919 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f417726f-0022-42f5-bfe8-79f6605d557c-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.025013 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f417726f-0022-42f5-bfe8-79f6605d557c-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.025082 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f417726f-0022-42f5-bfe8-79f6605d557c-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.025114 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f417726f-0022-42f5-bfe8-79f6605d557c-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.025209 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f417726f-0022-42f5-bfe8-79f6605d557c-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.025323 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f417726f-0022-42f5-bfe8-79f6605d557c-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.025389 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.025412 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f417726f-0022-42f5-bfe8-79f6605d557c-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.025447 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f417726f-0022-42f5-bfe8-79f6605d557c-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.035738 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-h54jn"] Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.086937 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.089564 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.097803 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-t92nz" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.100207 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.101396 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.109804 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.130956 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f417726f-0022-42f5-bfe8-79f6605d557c-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.131026 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f417726f-0022-42f5-bfe8-79f6605d557c-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.131049 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f417726f-0022-42f5-bfe8-79f6605d557c-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.131121 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f417726f-0022-42f5-bfe8-79f6605d557c-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.131186 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f417726f-0022-42f5-bfe8-79f6605d557c-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.131226 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f417726f-0022-42f5-bfe8-79f6605d557c-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.131258 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.131276 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f417726f-0022-42f5-bfe8-79f6605d557c-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.131402 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f417726f-0022-42f5-bfe8-79f6605d557c-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.131481 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7p9l2\" (UniqueName: \"kubernetes.io/projected/f417726f-0022-42f5-bfe8-79f6605d557c-kube-api-access-7p9l2\") pod \"rabbitmq-cell1-server-0\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.131509 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f417726f-0022-42f5-bfe8-79f6605d557c-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.132914 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f417726f-0022-42f5-bfe8-79f6605d557c-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.139293 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f417726f-0022-42f5-bfe8-79f6605d557c-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.143710 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f417726f-0022-42f5-bfe8-79f6605d557c-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.145202 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.147326 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f417726f-0022-42f5-bfe8-79f6605d557c-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.152832 4716 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.154986 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f417726f-0022-42f5-bfe8-79f6605d557c-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.166093 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.177598 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7p9l2\" (UniqueName: \"kubernetes.io/projected/f417726f-0022-42f5-bfe8-79f6605d557c-kube-api-access-7p9l2\") pod \"rabbitmq-cell1-server-0\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.183646 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f417726f-0022-42f5-bfe8-79f6605d557c-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.187021 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f417726f-0022-42f5-bfe8-79f6605d557c-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.192135 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f417726f-0022-42f5-bfe8-79f6605d557c-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.213976 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f417726f-0022-42f5-bfe8-79f6605d557c-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.238879 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0be2722-84d6-4885-80bc-a795d7f2c05e-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"b0be2722-84d6-4885-80bc-a795d7f2c05e\") " pod="openstack/openstack-galera-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.238970 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-galera-0\" (UID: \"b0be2722-84d6-4885-80bc-a795d7f2c05e\") " pod="openstack/openstack-galera-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.239132 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0be2722-84d6-4885-80bc-a795d7f2c05e-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"b0be2722-84d6-4885-80bc-a795d7f2c05e\") " pod="openstack/openstack-galera-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.239176 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b0be2722-84d6-4885-80bc-a795d7f2c05e-config-data-default\") pod \"openstack-galera-0\" (UID: \"b0be2722-84d6-4885-80bc-a795d7f2c05e\") " pod="openstack/openstack-galera-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.239199 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b0be2722-84d6-4885-80bc-a795d7f2c05e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"b0be2722-84d6-4885-80bc-a795d7f2c05e\") " pod="openstack/openstack-galera-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.239221 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b0be2722-84d6-4885-80bc-a795d7f2c05e-kolla-config\") pod \"openstack-galera-0\" (UID: \"b0be2722-84d6-4885-80bc-a795d7f2c05e\") " pod="openstack/openstack-galera-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.239314 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7gc5f\" (UniqueName: \"kubernetes.io/projected/b0be2722-84d6-4885-80bc-a795d7f2c05e-kube-api-access-7gc5f\") pod \"openstack-galera-0\" (UID: \"b0be2722-84d6-4885-80bc-a795d7f2c05e\") " pod="openstack/openstack-galera-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.239517 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b0be2722-84d6-4885-80bc-a795d7f2c05e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"b0be2722-84d6-4885-80bc-a795d7f2c05e\") " pod="openstack/openstack-galera-0" Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.277364 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-h54jn" event={"ID":"d166dd61-0ac4-4e96-88c6-59ac154db496","Type":"ContainerStarted","Data":"5170dcf69e7061f2c122ef73bb6d5f7c56addb36e426e8532d5d7091c64aab24"} Dec 09 15:30:02 crc kubenswrapper[4716]: I1209 15:30:02.280027 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:30:03 crc kubenswrapper[4716]: I1209 15:30:03.337044 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:30:03 crc kubenswrapper[4716]: I1209 15:30:03.355401 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b0be2722-84d6-4885-80bc-a795d7f2c05e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"b0be2722-84d6-4885-80bc-a795d7f2c05e\") " pod="openstack/openstack-galera-0" Dec 09 15:30:03 crc kubenswrapper[4716]: I1209 15:30:03.355560 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0be2722-84d6-4885-80bc-a795d7f2c05e-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"b0be2722-84d6-4885-80bc-a795d7f2c05e\") " pod="openstack/openstack-galera-0" Dec 09 15:30:03 crc kubenswrapper[4716]: I1209 15:30:03.355930 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b0be2722-84d6-4885-80bc-a795d7f2c05e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"b0be2722-84d6-4885-80bc-a795d7f2c05e\") " pod="openstack/openstack-galera-0" Dec 09 15:30:03 crc kubenswrapper[4716]: I1209 15:30:03.360977 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-galera-0\" (UID: \"b0be2722-84d6-4885-80bc-a795d7f2c05e\") " pod="openstack/openstack-galera-0" Dec 09 15:30:03 crc kubenswrapper[4716]: I1209 15:30:03.361182 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0be2722-84d6-4885-80bc-a795d7f2c05e-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"b0be2722-84d6-4885-80bc-a795d7f2c05e\") " pod="openstack/openstack-galera-0" Dec 09 15:30:03 crc kubenswrapper[4716]: I1209 15:30:03.361229 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b0be2722-84d6-4885-80bc-a795d7f2c05e-config-data-default\") pod \"openstack-galera-0\" (UID: \"b0be2722-84d6-4885-80bc-a795d7f2c05e\") " pod="openstack/openstack-galera-0" Dec 09 15:30:03 crc kubenswrapper[4716]: I1209 15:30:03.361243 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b0be2722-84d6-4885-80bc-a795d7f2c05e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"b0be2722-84d6-4885-80bc-a795d7f2c05e\") " pod="openstack/openstack-galera-0" Dec 09 15:30:03 crc kubenswrapper[4716]: I1209 15:30:03.361266 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b0be2722-84d6-4885-80bc-a795d7f2c05e-kolla-config\") pod \"openstack-galera-0\" (UID: \"b0be2722-84d6-4885-80bc-a795d7f2c05e\") " pod="openstack/openstack-galera-0" Dec 09 15:30:03 crc kubenswrapper[4716]: I1209 15:30:03.361373 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7gc5f\" (UniqueName: \"kubernetes.io/projected/b0be2722-84d6-4885-80bc-a795d7f2c05e-kube-api-access-7gc5f\") pod \"openstack-galera-0\" (UID: \"b0be2722-84d6-4885-80bc-a795d7f2c05e\") " pod="openstack/openstack-galera-0" Dec 09 15:30:03 crc kubenswrapper[4716]: I1209 15:30:03.362280 4716 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-galera-0\" (UID: \"b0be2722-84d6-4885-80bc-a795d7f2c05e\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/openstack-galera-0" Dec 09 15:30:03 crc kubenswrapper[4716]: I1209 15:30:03.370637 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0be2722-84d6-4885-80bc-a795d7f2c05e-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"b0be2722-84d6-4885-80bc-a795d7f2c05e\") " pod="openstack/openstack-galera-0" Dec 09 15:30:03 crc kubenswrapper[4716]: I1209 15:30:03.371560 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b0be2722-84d6-4885-80bc-a795d7f2c05e-config-data-default\") pod \"openstack-galera-0\" (UID: \"b0be2722-84d6-4885-80bc-a795d7f2c05e\") " pod="openstack/openstack-galera-0" Dec 09 15:30:03 crc kubenswrapper[4716]: I1209 15:30:03.373291 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b0be2722-84d6-4885-80bc-a795d7f2c05e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"b0be2722-84d6-4885-80bc-a795d7f2c05e\") " pod="openstack/openstack-galera-0" Dec 09 15:30:03 crc kubenswrapper[4716]: I1209 15:30:03.373861 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b0be2722-84d6-4885-80bc-a795d7f2c05e-kolla-config\") pod \"openstack-galera-0\" (UID: \"b0be2722-84d6-4885-80bc-a795d7f2c05e\") " pod="openstack/openstack-galera-0" Dec 09 15:30:03 crc kubenswrapper[4716]: I1209 15:30:03.374260 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0be2722-84d6-4885-80bc-a795d7f2c05e-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"b0be2722-84d6-4885-80bc-a795d7f2c05e\") " pod="openstack/openstack-galera-0" Dec 09 15:30:03 crc kubenswrapper[4716]: I1209 15:30:03.448696 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7gc5f\" (UniqueName: \"kubernetes.io/projected/b0be2722-84d6-4885-80bc-a795d7f2c05e-kube-api-access-7gc5f\") pod \"openstack-galera-0\" (UID: \"b0be2722-84d6-4885-80bc-a795d7f2c05e\") " pod="openstack/openstack-galera-0" Dec 09 15:30:03 crc kubenswrapper[4716]: I1209 15:30:03.473872 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-galera-0\" (UID: \"b0be2722-84d6-4885-80bc-a795d7f2c05e\") " pod="openstack/openstack-galera-0" Dec 09 15:30:03 crc kubenswrapper[4716]: I1209 15:30:03.661468 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-784v8"] Dec 09 15:30:03 crc kubenswrapper[4716]: I1209 15:30:03.667238 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Dec 09 15:30:04 crc kubenswrapper[4716]: I1209 15:30:04.199451 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 09 15:30:04 crc kubenswrapper[4716]: I1209 15:30:04.213309 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421570-smgjt"] Dec 09 15:30:04 crc kubenswrapper[4716]: I1209 15:30:04.506070 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"2e140762-44f7-46f9-9bbe-a8f780186869","Type":"ContainerStarted","Data":"011f85a5a25660be9efc49e788d83e16413bbfe1c4dc8846aef954335394c58e"} Dec 09 15:30:05 crc kubenswrapper[4716]: I1209 15:30:05.215859 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421570-smgjt" event={"ID":"3cb80e47-ed2e-48c3-8332-346daa870065","Type":"ContainerStarted","Data":"e5d48bc9e6ca746999a5a8d822d37f25bd16bc5d0cd204e66d3502a97d0ef716"} Dec 09 15:30:05 crc kubenswrapper[4716]: I1209 15:30:05.293895 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-784v8" event={"ID":"5e40d00b-b8ee-4cf9-859a-5e92c49182c6","Type":"ContainerStarted","Data":"c194aaf7b53f06fad91561f99ee5d6a842e6b1753388cf7b52f40e62557e5e3e"} Dec 09 15:30:05 crc kubenswrapper[4716]: I1209 15:30:05.293946 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 09 15:30:09 crc kubenswrapper[4716]: I1209 15:30:08.733922 4716 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-nt28v container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.14:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 09 15:30:09 crc kubenswrapper[4716]: I1209 15:30:08.734538 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-nt28v" podUID="f08b8fc4-fca7-4a7e-b42d-97f41d3cd136" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.14:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 09 15:30:09 crc kubenswrapper[4716]: I1209 15:30:08.734991 4716 patch_prober.go:28] interesting pod/openshift-kube-scheduler-crc container/kube-scheduler namespace/openshift-kube-scheduler: Liveness probe status=failure output="Get \"https://192.168.126.11:10259/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 09 15:30:09 crc kubenswrapper[4716]: I1209 15:30:08.735045 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podUID="3dcd261975c3d6b9a6ad6367fd4facd3" containerName="kube-scheduler" probeResult="failure" output="Get \"https://192.168.126.11:10259/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 09 15:30:09 crc kubenswrapper[4716]: I1209 15:30:08.735198 4716 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-nt28v container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.14:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 09 15:30:09 crc kubenswrapper[4716]: I1209 15:30:08.735248 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-nt28v" podUID="f08b8fc4-fca7-4a7e-b42d-97f41d3cd136" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.14:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 09 15:30:09 crc kubenswrapper[4716]: I1209 15:30:09.363625 4716 patch_prober.go:28] interesting pod/dns-default-pvl7k container/dns namespace/openshift-dns: Readiness probe status=failure output="Get \"http://10.217.0.23:8181/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 09 15:30:09 crc kubenswrapper[4716]: I1209 15:30:09.364510 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-dns/dns-default-pvl7k" podUID="103c6227-5dc6-4322-ae14-201ad9e08295" containerName="dns" probeResult="failure" output="Get \"http://10.217.0.23:8181/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 09 15:30:09 crc kubenswrapper[4716]: I1209 15:30:09.364520 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-xzp8n" podUID="266cbf21-b738-4045-902b-88deadcc5869" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.101:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 09 15:30:09 crc kubenswrapper[4716]: I1209 15:30:09.364267 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-xzp8n" podUID="266cbf21-b738-4045-902b-88deadcc5869" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.101:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 09 15:30:10 crc kubenswrapper[4716]: I1209 15:30:10.048761 4716 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-nt28v container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.14:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 09 15:30:10 crc kubenswrapper[4716]: I1209 15:30:10.346928 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/metallb-operator-webhook-server-65998d95b4-8s5d4" podUID="2a6158db-9b91-48c4-a8fb-610541de1ebe" containerName="webhook-server" probeResult="failure" output="Get \"http://10.217.0.94:7472/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 09 15:30:10 crc kubenswrapper[4716]: I1209 15:30:10.359551 4716 patch_prober.go:28] interesting pod/logging-loki-gateway-58996586f7-7vqxj container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.55:8083/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 09 15:30:10 crc kubenswrapper[4716]: I1209 15:30:10.359618 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-58996586f7-7vqxj" podUID="f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.55:8083/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 09 15:30:10 crc kubenswrapper[4716]: I1209 15:30:10.049318 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-nt28v" podUID="f08b8fc4-fca7-4a7e-b42d-97f41d3cd136" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.14:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 09 15:30:10 crc kubenswrapper[4716]: I1209 15:30:10.379043 4716 patch_prober.go:28] interesting pod/logging-loki-gateway-58996586f7-7vqxj container/gateway namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.55:8081/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 09 15:30:10 crc kubenswrapper[4716]: I1209 15:30:10.379108 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-58996586f7-7vqxj" podUID="f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef" containerName="gateway" probeResult="failure" output="Get \"https://10.217.0.55:8081/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 09 15:30:10 crc kubenswrapper[4716]: I1209 15:30:10.379970 4716 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.645938541s: [/var/lib/containers/storage/overlay/f4554ba19e576e25423f7d4bcef89f77155d79540abf89a35785b04d87e6725b/diff /var/log/pods/openstack-operators_openstack-operator-controller-manager-5955d8c9f-mpmbx_3221ee7d-c104-4055-961a-46cd6ba8c602/manager/0.log]; will not log again for this container unless duration exceeds 2s Dec 09 15:30:10 crc kubenswrapper[4716]: I1209 15:30:10.408191 4716 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.048740449s: [/var/lib/containers/storage/overlay/f1ea02bdde1481a2569b6705e0381242fd1d48f0738a8afb323576bb0c6d4736/diff /var/log/pods/openstack-operators_swift-operator-controller-manager-9d58d64bc-h4nlt_4d332287-0f6e-46d1-9cd9-d31dd855d753/kube-rbac-proxy/0.log]; will not log again for this container unless duration exceeds 2s Dec 09 15:30:10 crc kubenswrapper[4716]: I1209 15:30:10.422300 4716 patch_prober.go:28] interesting pod/prometheus-operator-admission-webhook-f54c54754-kzp7z container/prometheus-operator-admission-webhook namespace/openshift-monitoring: Liveness probe status=failure output="Get \"https://10.217.0.71:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 09 15:30:10 crc kubenswrapper[4716]: I1209 15:30:10.422358 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-kzp7z" podUID="921a9f99-8a08-4f4b-8c55-6221e69d5356" containerName="prometheus-operator-admission-webhook" probeResult="failure" output="Get \"https://10.217.0.71:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 09 15:30:10 crc kubenswrapper[4716]: I1209 15:30:10.422557 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-649h2" podUID="d5727f03-aede-454c-8dec-17e10986da51" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.116:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 09 15:30:10 crc kubenswrapper[4716]: I1209 15:30:10.422673 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-649h2" podUID="d5727f03-aede-454c-8dec-17e10986da51" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.116:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 09 15:30:10 crc kubenswrapper[4716]: I1209 15:30:10.424155 4716 patch_prober.go:28] interesting pod/downloads-7954f5f757-8qjxz container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.29:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 09 15:30:10 crc kubenswrapper[4716]: I1209 15:30:10.424192 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-8qjxz" podUID="31afed1d-4e1c-491e-b54b-a5e7e24077f1" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.29:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 09 15:30:10 crc kubenswrapper[4716]: I1209 15:30:10.569068 4716 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.834773318s: [/var/lib/containers/storage/overlay/4bbb0d2955bdf6f4daa4aece4a8cb08431ed3ab2ebeb167ca0033277a187dc7b/diff /var/log/pods/openshift-cluster-version_cluster-version-operator-5c965bbfc6-k7f89_ea3457ac-3b63-4647-9ead-f31bdd2c7027/cluster-version-operator/0.log]; will not log again for this container unless duration exceeds 2s Dec 09 15:30:10 crc kubenswrapper[4716]: I1209 15:30:10.584376 4716 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.850475824s: [/var/lib/containers/storage/overlay/373dc43cc3913987dd4a7221ac28fb2c4d6f8ef4cd2a7d1132a404bb1e625baa/diff /var/log/pods/openstack-operators_horizon-operator-controller-manager-68c6d99b8f-9d9ms_87589b07-5c3c-46c9-b84e-ffc2efa3b817/kube-rbac-proxy/0.log]; will not log again for this container unless duration exceeds 2s Dec 09 15:30:10 crc kubenswrapper[4716]: I1209 15:30:10.620429 4716 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.256044241s: [/var/lib/containers/storage/overlay/e8d8fb5b54f06e7081400cf51b1d241c2210af94c46557f64fa9582e81181a2e/diff ]; will not log again for this container unless duration exceeds 2s Dec 09 15:30:10 crc kubenswrapper[4716]: I1209 15:30:10.620892 4716 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.261732703s: [/var/lib/containers/storage/overlay/7da7c86a0520256faffce35b07172b16faa4c71fb2160cb5672a037cc8045678/diff /var/log/pods/openstack-operators_designate-operator-controller-manager-697fb699cf-p6hrb_ee92f03c-d515-4aa3-ad3c-cce0c89fd12b/kube-rbac-proxy/0.log]; will not log again for this container unless duration exceeds 2s Dec 09 15:30:10 crc kubenswrapper[4716]: I1209 15:30:10.622432 4716 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.263266466s: [/var/lib/containers/storage/overlay/5ff457b96fc5b92cf2cad81205e9deb1a5f21c97a5f986d4b34a36f8b6293fd9/diff /var/log/pods/openstack-operators_barbican-operator-controller-manager-7d9dfd778-z2l9s_33b32911-25d0-45d5-8009-4d9787875e86/kube-rbac-proxy/0.log]; will not log again for this container unless duration exceeds 2s Dec 09 15:30:10 crc kubenswrapper[4716]: I1209 15:30:10.624431 4716 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.889768568s: [/var/lib/containers/storage/overlay/446c7b34c07dcf974d72f901cd4a1a3f8d953cc97774047dad4b4ab137dc20c1/diff /var/log/pods/openstack-operators_ovn-operator-controller-manager-b6456fdb6-649h2_d5727f03-aede-454c-8dec-17e10986da51/kube-rbac-proxy/0.log]; will not log again for this container unless duration exceeds 2s Dec 09 15:30:10 crc kubenswrapper[4716]: I1209 15:30:10.646826 4716 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.287653028s: [/var/lib/containers/storage/overlay/4c6ca0fb5c9b701a263b74f07e8138c1b7e7f7fc9ba8cb9210d986d09a26d7f8/diff /var/log/pods/openstack-operators_nova-operator-controller-manager-697bc559fc-pmct6_f1029307-bcf9-40c0-b656-b7d203493022/kube-rbac-proxy/0.log]; will not log again for this container unless duration exceeds 2s Dec 09 15:30:10 crc kubenswrapper[4716]: I1209 15:30:10.650962 4716 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.916475906s: [/var/lib/containers/storage/overlay/e5153b1f32e9a2bd8d6b64d0554d29c8b590364dc26d40e731865b07e32cab34/diff ]; will not log again for this container unless duration exceeds 2s Dec 09 15:30:10 crc kubenswrapper[4716]: I1209 15:30:10.668539 4716 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.933791988s: [/var/lib/containers/storage/overlay/ca15ff759d7893d9431bbb684a76c1679c82b91168b7ce5a63e2ba12bfb22858/diff /var/log/pods/openstack-operators_mariadb-operator-controller-manager-79c8c4686c-5pz92_8da5796c-fddb-414b-8d26-6657d25e6c00/manager/0.log]; will not log again for this container unless duration exceeds 2s Dec 09 15:30:10 crc kubenswrapper[4716]: I1209 15:30:10.679107 4716 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.944391608s: [/var/lib/containers/storage/overlay/54289757c1dff11f259e0e663e3b6a7bf8754a11b2ff19bbb93fbe6e63d641eb/diff /var/log/pods/openstack-operators_telemetry-operator-controller-manager-5656d9bf69-llf2c_57b68343-4540-4097-9f68-a538c63bae3b/kube-rbac-proxy/0.log]; will not log again for this container unless duration exceeds 2s Dec 09 15:30:10 crc kubenswrapper[4716]: I1209 15:30:10.684513 4716 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.950216003s: [/var/lib/containers/storage/overlay/aca0d57e04cbf92a721c6ebedf6e1ab5d71cb0ac8ffbec8e2c1bcec90d485f34/diff /var/log/pods/openstack-operators_cinder-operator-controller-manager-6c677c69b-xzp8n_266cbf21-b738-4045-902b-88deadcc5869/kube-rbac-proxy/0.log]; will not log again for this container unless duration exceeds 2s Dec 09 15:30:10 crc kubenswrapper[4716]: I1209 15:30:10.685527 4716 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.951628753s: [/var/lib/containers/storage/overlay/411cd086108437a433dbcfb2d98b78e8e1485eae81ff3d48d95d4fcfb31a7e25/diff /var/log/pods/openstack-operators_test-operator-controller-manager-5854674fcc-kspms_ae27699b-b83f-49e3-a0f0-a1fd2fc4c6f2/kube-rbac-proxy/0.log]; will not log again for this container unless duration exceeds 2s Dec 09 15:30:10 crc kubenswrapper[4716]: I1209 15:30:10.686420 4716 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.322300921s: [/var/lib/containers/storage/overlay/908b32bfe065476fa12feffc2569d168d6b244806e86350672087301f71e6e4e/diff /var/log/pods/openstack-operators_heat-operator-controller-manager-5f64f6f8bb-2ft2d_c2869bc2-fe1e-49a0-9bd5-ec2b1a23b489/kube-rbac-proxy/0.log]; will not log again for this container unless duration exceeds 2s Dec 09 15:30:10 crc kubenswrapper[4716]: I1209 15:30:10.688434 4716 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.953845126s: [/var/lib/containers/storage/overlay/e0467c1a6a544f467e9eb4e02c54d36ff14f881af0e6e4dfa6b7d74d0c5b2b33/diff /var/log/pods/openstack-operators_ironic-operator-controller-manager-967d97867-hq8hn_695a0bab-c2c8-4c7d-9420-2dc191000e54/kube-rbac-proxy/0.log]; will not log again for this container unless duration exceeds 2s Dec 09 15:30:10 crc kubenswrapper[4716]: I1209 15:30:10.688520 4716 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.298012452s: [/var/lib/containers/storage/overlay/0ad504b538e7f407c5658fe56f7b8db3f3f3440076a3cd4c50cf39423673a1ea/diff /var/log/pods/openshift-marketplace_redhat-operators-9z9z6_ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922/registry-server/0.log]; will not log again for this container unless duration exceeds 2s Dec 09 15:30:10 crc kubenswrapper[4716]: I1209 15:30:10.694845 4716 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.360233608s: [/var/lib/containers/storage/overlay/39e193e43e7eceff346041213836c9d5089e04b21275b4db60756a770fdd49b5/diff /var/log/pods/openstack-operators_neutron-operator-controller-manager-5fdfd5b6b5-qgr4j_86831782-f8f0-40ad-99f7-4568185065b0/kube-rbac-proxy/0.log]; will not log again for this container unless duration exceeds 2s Dec 09 15:30:10 crc kubenswrapper[4716]: I1209 15:30:10.696578 4716 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.962285025s: [/var/lib/containers/storage/overlay/22af4e8f313cdc54ca315fafb98b723a20221fc8293dd2966690b9fca8d279fa/diff ]; will not log again for this container unless duration exceeds 2s Dec 09 15:30:10 crc kubenswrapper[4716]: I1209 15:30:10.699143 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/ironic-operator-controller-manager-967d97867-hq8hn" podUID="695a0bab-c2c8-4c7d-9420-2dc191000e54" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.108:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 09 15:30:10 crc kubenswrapper[4716]: I1209 15:30:10.699245 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ironic-operator-controller-manager-967d97867-hq8hn" podUID="695a0bab-c2c8-4c7d-9420-2dc191000e54" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.108:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 09 15:30:10 crc kubenswrapper[4716]: I1209 15:30:10.699286 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-z2l9s" podUID="33b32911-25d0-45d5-8009-4d9787875e86" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.102:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 09 15:30:10 crc kubenswrapper[4716]: I1209 15:30:10.699324 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-z2l9s" podUID="33b32911-25d0-45d5-8009-4d9787875e86" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.102:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 09 15:30:10 crc kubenswrapper[4716]: I1209 15:30:10.699388 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-p6hrb" podUID="ee92f03c-d515-4aa3-ad3c-cce0c89fd12b" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.103:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 09 15:30:10 crc kubenswrapper[4716]: I1209 15:30:10.700910 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-dzpdl" podUID="30c82b79-c03d-45d3-8b0a-ca506daf2934" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.110:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 09 15:30:11 crc kubenswrapper[4716]: I1209 15:30:11.903455 4716 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.57:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 09 15:30:11 crc kubenswrapper[4716]: I1209 15:30:11.903790 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="00f16fe7-88b3-4d0a-ba38-f68a4d340686" containerName="loki-ingester" probeResult="failure" output="Get \"https://10.217.0.57:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 09 15:30:11 crc kubenswrapper[4716]: I1209 15:30:11.928468 4716 patch_prober.go:28] interesting pod/logging-loki-query-frontend-84558f7c9f-zcnfl container/loki-query-frontend namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.53:3101/loki/api/v1/status/buildinfo\": context deadline exceeded" start-of-body= Dec 09 15:30:11 crc kubenswrapper[4716]: I1209 15:30:11.928512 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-zcnfl" podUID="1c35fb07-6fe6-490e-b627-165a5500e574" containerName="loki-query-frontend" probeResult="failure" output="Get \"https://10.217.0.53:3101/loki/api/v1/status/buildinfo\": context deadline exceeded" Dec 09 15:30:11 crc kubenswrapper[4716]: E1209 15:30:11.940796 4716 kubelet.go:2526] "Housekeeping took longer than expected" err="housekeeping took too long" expected="1s" actual="3.206s" Dec 09 15:30:11 crc kubenswrapper[4716]: I1209 15:30:11.940866 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Dec 09 15:30:11 crc kubenswrapper[4716]: I1209 15:30:11.959352 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f417726f-0022-42f5-bfe8-79f6605d557c","Type":"ContainerStarted","Data":"109d2ea91abfd32719018d9e4affafb6c5882e449d187fb5f6b8096e056779e5"} Dec 09 15:30:11 crc kubenswrapper[4716]: I1209 15:30:11.959406 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421570-smgjt" event={"ID":"3cb80e47-ed2e-48c3-8332-346daa870065","Type":"ContainerStarted","Data":"554a902df2fc58e489861334df7a421200b30106185ab26bf825e11708e3875f"} Dec 09 15:30:11 crc kubenswrapper[4716]: I1209 15:30:11.979317 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29421570-smgjt" podStartSLOduration=11.979287483 podStartE2EDuration="11.979287483s" podCreationTimestamp="2025-12-09 15:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:30:10.623941995 +0000 UTC m=+1297.778685973" watchObservedRunningTime="2025-12-09 15:30:11.979287483 +0000 UTC m=+1299.134031471" Dec 09 15:30:14 crc kubenswrapper[4716]: I1209 15:30:14.288227 4716 patch_prober.go:28] interesting pod/logging-loki-query-frontend-84558f7c9f-zcnfl container/loki-query-frontend namespace/openshift-logging: Liveness probe status=failure output="Get \"https://10.217.0.53:3101/loki/api/v1/status/buildinfo\": context deadline exceeded" start-of-body= Dec 09 15:30:14 crc kubenswrapper[4716]: I1209 15:30:14.288842 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-zcnfl" podUID="1c35fb07-6fe6-490e-b627-165a5500e574" containerName="loki-query-frontend" probeResult="failure" output="Get \"https://10.217.0.53:3101/loki/api/v1/status/buildinfo\": context deadline exceeded" Dec 09 15:30:14 crc kubenswrapper[4716]: E1209 15:30:14.299916 4716 kubelet.go:2526] "Housekeeping took longer than expected" err="housekeeping took too long" expected="1s" actual="2.34s" Dec 09 15:30:14 crc kubenswrapper[4716]: I1209 15:30:14.299965 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Dec 09 15:30:14 crc kubenswrapper[4716]: I1209 15:30:14.313361 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Dec 09 15:30:14 crc kubenswrapper[4716]: I1209 15:30:14.313409 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"b0be2722-84d6-4885-80bc-a795d7f2c05e","Type":"ContainerStarted","Data":"429ae242735a6ede6cf4e37dd2241887928ffaba88b94d708e8f90cdf41aefac"} Dec 09 15:30:14 crc kubenswrapper[4716]: I1209 15:30:14.321841 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Dec 09 15:30:14 crc kubenswrapper[4716]: I1209 15:30:14.398130 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Dec 09 15:30:14 crc kubenswrapper[4716]: I1209 15:30:14.398347 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Dec 09 15:30:14 crc kubenswrapper[4716]: I1209 15:30:14.398534 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Dec 09 15:30:14 crc kubenswrapper[4716]: I1209 15:30:14.398754 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-bltwq" Dec 09 15:30:15 crc kubenswrapper[4716]: I1209 15:30:15.424679 4716 patch_prober.go:28] interesting pod/monitoring-plugin-588c6cf68c-pzb5w container/monitoring-plugin namespace/openshift-monitoring: Readiness probe status=failure output="Get \"https://10.217.0.79:9443/health\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 09 15:30:15 crc kubenswrapper[4716]: I1209 15:30:15.425003 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-monitoring/monitoring-plugin-588c6cf68c-pzb5w" podUID="80f5f5d3-4060-4ef8-a164-522526390e5c" containerName="monitoring-plugin" probeResult="failure" output="Get \"https://10.217.0.79:9443/health\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 09 15:30:15 crc kubenswrapper[4716]: I1209 15:30:15.425858 4716 patch_prober.go:28] interesting pod/logging-loki-gateway-58996586f7-7vqxj container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.55:8083/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 09 15:30:15 crc kubenswrapper[4716]: I1209 15:30:15.425909 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-58996586f7-7vqxj" podUID="f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.55:8083/ready\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 09 15:30:15 crc kubenswrapper[4716]: I1209 15:30:15.427265 4716 patch_prober.go:28] interesting pod/logging-loki-gateway-58996586f7-7vqxj container/gateway namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.55:8081/ready\": context deadline exceeded" start-of-body= Dec 09 15:30:15 crc kubenswrapper[4716]: I1209 15:30:15.427319 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-58996586f7-7vqxj" podUID="f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef" containerName="gateway" probeResult="failure" output="Get \"https://10.217.0.55:8081/ready\": context deadline exceeded" Dec 09 15:30:16 crc kubenswrapper[4716]: E1209 15:30:16.015272 4716 kubelet.go:2526] "Housekeeping took longer than expected" err="housekeeping took too long" expected="1s" actual="1.702s" Dec 09 15:30:16 crc kubenswrapper[4716]: I1209 15:30:16.016084 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c32f00d5-f870-4dc6-8387-81bfe37b06f8-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"c32f00d5-f870-4dc6-8387-81bfe37b06f8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 15:30:16 crc kubenswrapper[4716]: I1209 15:30:16.022729 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c32f00d5-f870-4dc6-8387-81bfe37b06f8-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"c32f00d5-f870-4dc6-8387-81bfe37b06f8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 15:30:16 crc kubenswrapper[4716]: I1209 15:30:16.022886 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c32f00d5-f870-4dc6-8387-81bfe37b06f8-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"c32f00d5-f870-4dc6-8387-81bfe37b06f8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 15:30:16 crc kubenswrapper[4716]: I1209 15:30:16.023087 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-cell1-galera-0\" (UID: \"c32f00d5-f870-4dc6-8387-81bfe37b06f8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 15:30:16 crc kubenswrapper[4716]: I1209 15:30:16.023265 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/c32f00d5-f870-4dc6-8387-81bfe37b06f8-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"c32f00d5-f870-4dc6-8387-81bfe37b06f8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 15:30:16 crc kubenswrapper[4716]: I1209 15:30:16.023373 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c32f00d5-f870-4dc6-8387-81bfe37b06f8-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"c32f00d5-f870-4dc6-8387-81bfe37b06f8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 15:30:16 crc kubenswrapper[4716]: I1209 15:30:16.023497 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5tvk2\" (UniqueName: \"kubernetes.io/projected/c32f00d5-f870-4dc6-8387-81bfe37b06f8-kube-api-access-5tvk2\") pod \"openstack-cell1-galera-0\" (UID: \"c32f00d5-f870-4dc6-8387-81bfe37b06f8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 15:30:16 crc kubenswrapper[4716]: I1209 15:30:16.023644 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c32f00d5-f870-4dc6-8387-81bfe37b06f8-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"c32f00d5-f870-4dc6-8387-81bfe37b06f8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 15:30:16 crc kubenswrapper[4716]: I1209 15:30:16.134701 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c32f00d5-f870-4dc6-8387-81bfe37b06f8-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"c32f00d5-f870-4dc6-8387-81bfe37b06f8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 15:30:16 crc kubenswrapper[4716]: I1209 15:30:16.134903 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c32f00d5-f870-4dc6-8387-81bfe37b06f8-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"c32f00d5-f870-4dc6-8387-81bfe37b06f8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 15:30:16 crc kubenswrapper[4716]: I1209 15:30:16.135017 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c32f00d5-f870-4dc6-8387-81bfe37b06f8-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"c32f00d5-f870-4dc6-8387-81bfe37b06f8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 15:30:16 crc kubenswrapper[4716]: I1209 15:30:16.135086 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c32f00d5-f870-4dc6-8387-81bfe37b06f8-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"c32f00d5-f870-4dc6-8387-81bfe37b06f8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 15:30:16 crc kubenswrapper[4716]: I1209 15:30:16.137057 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-cell1-galera-0\" (UID: \"c32f00d5-f870-4dc6-8387-81bfe37b06f8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 15:30:16 crc kubenswrapper[4716]: I1209 15:30:16.137208 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/c32f00d5-f870-4dc6-8387-81bfe37b06f8-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"c32f00d5-f870-4dc6-8387-81bfe37b06f8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 15:30:16 crc kubenswrapper[4716]: I1209 15:30:16.137308 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c32f00d5-f870-4dc6-8387-81bfe37b06f8-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"c32f00d5-f870-4dc6-8387-81bfe37b06f8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 15:30:16 crc kubenswrapper[4716]: I1209 15:30:16.137366 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5tvk2\" (UniqueName: \"kubernetes.io/projected/c32f00d5-f870-4dc6-8387-81bfe37b06f8-kube-api-access-5tvk2\") pod \"openstack-cell1-galera-0\" (UID: \"c32f00d5-f870-4dc6-8387-81bfe37b06f8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 15:30:16 crc kubenswrapper[4716]: I1209 15:30:16.139412 4716 generic.go:334] "Generic (PLEG): container finished" podID="3cb80e47-ed2e-48c3-8332-346daa870065" containerID="554a902df2fc58e489861334df7a421200b30106185ab26bf825e11708e3875f" exitCode=0 Dec 09 15:30:16 crc kubenswrapper[4716]: I1209 15:30:16.139823 4716 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-cell1-galera-0\" (UID: \"c32f00d5-f870-4dc6-8387-81bfe37b06f8\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/openstack-cell1-galera-0" Dec 09 15:30:16 crc kubenswrapper[4716]: I1209 15:30:16.140594 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c32f00d5-f870-4dc6-8387-81bfe37b06f8-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"c32f00d5-f870-4dc6-8387-81bfe37b06f8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 15:30:16 crc kubenswrapper[4716]: I1209 15:30:16.136796 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c32f00d5-f870-4dc6-8387-81bfe37b06f8-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"c32f00d5-f870-4dc6-8387-81bfe37b06f8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 15:30:16 crc kubenswrapper[4716]: I1209 15:30:16.142488 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c32f00d5-f870-4dc6-8387-81bfe37b06f8-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"c32f00d5-f870-4dc6-8387-81bfe37b06f8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 15:30:16 crc kubenswrapper[4716]: I1209 15:30:16.142703 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c32f00d5-f870-4dc6-8387-81bfe37b06f8-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"c32f00d5-f870-4dc6-8387-81bfe37b06f8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 15:30:16 crc kubenswrapper[4716]: I1209 15:30:16.145989 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/c32f00d5-f870-4dc6-8387-81bfe37b06f8-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"c32f00d5-f870-4dc6-8387-81bfe37b06f8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 15:30:16 crc kubenswrapper[4716]: I1209 15:30:16.147608 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c32f00d5-f870-4dc6-8387-81bfe37b06f8-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"c32f00d5-f870-4dc6-8387-81bfe37b06f8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 15:30:16 crc kubenswrapper[4716]: I1209 15:30:16.177882 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5tvk2\" (UniqueName: \"kubernetes.io/projected/c32f00d5-f870-4dc6-8387-81bfe37b06f8-kube-api-access-5tvk2\") pod \"openstack-cell1-galera-0\" (UID: \"c32f00d5-f870-4dc6-8387-81bfe37b06f8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 15:30:18 crc kubenswrapper[4716]: I1209 15:30:18.536157 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-operator-controller-operator-77b4579dbb-785g6" podUID="7b3f5ffb-3826-458b-9777-efd0b8cc747e" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.100:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 09 15:30:18 crc kubenswrapper[4716]: I1209 15:30:18.556306 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-controller-operator-77b4579dbb-785g6" podUID="7b3f5ffb-3826-458b-9777-efd0b8cc747e" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.100:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 09 15:30:18 crc kubenswrapper[4716]: E1209 15:30:18.558699 4716 kubelet.go:2526] "Housekeeping took longer than expected" err="housekeeping took too long" expected="1s" actual="2.542s" Dec 09 15:30:18 crc kubenswrapper[4716]: I1209 15:30:18.558746 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421570-smgjt" event={"ID":"3cb80e47-ed2e-48c3-8332-346daa870065","Type":"ContainerDied","Data":"554a902df2fc58e489861334df7a421200b30106185ab26bf825e11708e3875f"} Dec 09 15:30:18 crc kubenswrapper[4716]: I1209 15:30:18.593550 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-ktf2s" podUID="47c82d22-477b-4bd0-afd3-9ae2fed959f0" containerName="registry-server" probeResult="failure" output=< Dec 09 15:30:18 crc kubenswrapper[4716]: timeout: failed to connect service ":50051" within 1s Dec 09 15:30:18 crc kubenswrapper[4716]: > Dec 09 15:30:18 crc kubenswrapper[4716]: I1209 15:30:18.634377 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-cell1-galera-0\" (UID: \"c32f00d5-f870-4dc6-8387-81bfe37b06f8\") " pod="openstack/openstack-cell1-galera-0" Dec 09 15:30:18 crc kubenswrapper[4716]: E1209 15:30:18.795051 4716 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3cb80e47_ed2e_48c3_8332_346daa870065.slice/crio-conmon-554a902df2fc58e489861334df7a421200b30106185ab26bf825e11708e3875f.scope\": RecentStats: unable to find data in memory cache]" Dec 09 15:30:18 crc kubenswrapper[4716]: I1209 15:30:18.890143 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Dec 09 15:30:18 crc kubenswrapper[4716]: E1209 15:30:18.921954 4716 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3cb80e47_ed2e_48c3_8332_346daa870065.slice/crio-conmon-554a902df2fc58e489861334df7a421200b30106185ab26bf825e11708e3875f.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3cb80e47_ed2e_48c3_8332_346daa870065.slice/crio-554a902df2fc58e489861334df7a421200b30106185ab26bf825e11708e3875f.scope\": RecentStats: unable to find data in memory cache]" Dec 09 15:30:19 crc kubenswrapper[4716]: I1209 15:30:19.072455 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Dec 09 15:30:19 crc kubenswrapper[4716]: I1209 15:30:19.073929 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Dec 09 15:30:19 crc kubenswrapper[4716]: I1209 15:30:19.087553 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Dec 09 15:30:19 crc kubenswrapper[4716]: I1209 15:30:19.093075 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Dec 09 15:30:19 crc kubenswrapper[4716]: I1209 15:30:19.093967 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-qdwp7" Dec 09 15:30:19 crc kubenswrapper[4716]: I1209 15:30:19.120804 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Dec 09 15:30:19 crc kubenswrapper[4716]: I1209 15:30:19.139755 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/67d51dc9-9087-4d11-9f1f-0947af797f5d-memcached-tls-certs\") pod \"memcached-0\" (UID: \"67d51dc9-9087-4d11-9f1f-0947af797f5d\") " pod="openstack/memcached-0" Dec 09 15:30:19 crc kubenswrapper[4716]: I1209 15:30:19.139852 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67d51dc9-9087-4d11-9f1f-0947af797f5d-combined-ca-bundle\") pod \"memcached-0\" (UID: \"67d51dc9-9087-4d11-9f1f-0947af797f5d\") " pod="openstack/memcached-0" Dec 09 15:30:19 crc kubenswrapper[4716]: I1209 15:30:19.139886 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/67d51dc9-9087-4d11-9f1f-0947af797f5d-kolla-config\") pod \"memcached-0\" (UID: \"67d51dc9-9087-4d11-9f1f-0947af797f5d\") " pod="openstack/memcached-0" Dec 09 15:30:19 crc kubenswrapper[4716]: I1209 15:30:19.139916 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/67d51dc9-9087-4d11-9f1f-0947af797f5d-config-data\") pod \"memcached-0\" (UID: \"67d51dc9-9087-4d11-9f1f-0947af797f5d\") " pod="openstack/memcached-0" Dec 09 15:30:19 crc kubenswrapper[4716]: I1209 15:30:19.140056 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-599sz\" (UniqueName: \"kubernetes.io/projected/67d51dc9-9087-4d11-9f1f-0947af797f5d-kube-api-access-599sz\") pod \"memcached-0\" (UID: \"67d51dc9-9087-4d11-9f1f-0947af797f5d\") " pod="openstack/memcached-0" Dec 09 15:30:19 crc kubenswrapper[4716]: I1209 15:30:19.255360 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/67d51dc9-9087-4d11-9f1f-0947af797f5d-memcached-tls-certs\") pod \"memcached-0\" (UID: \"67d51dc9-9087-4d11-9f1f-0947af797f5d\") " pod="openstack/memcached-0" Dec 09 15:30:19 crc kubenswrapper[4716]: I1209 15:30:19.255472 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67d51dc9-9087-4d11-9f1f-0947af797f5d-combined-ca-bundle\") pod \"memcached-0\" (UID: \"67d51dc9-9087-4d11-9f1f-0947af797f5d\") " pod="openstack/memcached-0" Dec 09 15:30:19 crc kubenswrapper[4716]: I1209 15:30:19.255511 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/67d51dc9-9087-4d11-9f1f-0947af797f5d-kolla-config\") pod \"memcached-0\" (UID: \"67d51dc9-9087-4d11-9f1f-0947af797f5d\") " pod="openstack/memcached-0" Dec 09 15:30:19 crc kubenswrapper[4716]: I1209 15:30:19.255538 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/67d51dc9-9087-4d11-9f1f-0947af797f5d-config-data\") pod \"memcached-0\" (UID: \"67d51dc9-9087-4d11-9f1f-0947af797f5d\") " pod="openstack/memcached-0" Dec 09 15:30:19 crc kubenswrapper[4716]: I1209 15:30:19.255660 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-599sz\" (UniqueName: \"kubernetes.io/projected/67d51dc9-9087-4d11-9f1f-0947af797f5d-kube-api-access-599sz\") pod \"memcached-0\" (UID: \"67d51dc9-9087-4d11-9f1f-0947af797f5d\") " pod="openstack/memcached-0" Dec 09 15:30:19 crc kubenswrapper[4716]: I1209 15:30:19.265176 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/67d51dc9-9087-4d11-9f1f-0947af797f5d-kolla-config\") pod \"memcached-0\" (UID: \"67d51dc9-9087-4d11-9f1f-0947af797f5d\") " pod="openstack/memcached-0" Dec 09 15:30:19 crc kubenswrapper[4716]: I1209 15:30:19.265741 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/67d51dc9-9087-4d11-9f1f-0947af797f5d-config-data\") pod \"memcached-0\" (UID: \"67d51dc9-9087-4d11-9f1f-0947af797f5d\") " pod="openstack/memcached-0" Dec 09 15:30:19 crc kubenswrapper[4716]: I1209 15:30:19.284946 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-599sz\" (UniqueName: \"kubernetes.io/projected/67d51dc9-9087-4d11-9f1f-0947af797f5d-kube-api-access-599sz\") pod \"memcached-0\" (UID: \"67d51dc9-9087-4d11-9f1f-0947af797f5d\") " pod="openstack/memcached-0" Dec 09 15:30:19 crc kubenswrapper[4716]: I1209 15:30:19.300073 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/67d51dc9-9087-4d11-9f1f-0947af797f5d-memcached-tls-certs\") pod \"memcached-0\" (UID: \"67d51dc9-9087-4d11-9f1f-0947af797f5d\") " pod="openstack/memcached-0" Dec 09 15:30:19 crc kubenswrapper[4716]: I1209 15:30:19.311216 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67d51dc9-9087-4d11-9f1f-0947af797f5d-combined-ca-bundle\") pod \"memcached-0\" (UID: \"67d51dc9-9087-4d11-9f1f-0947af797f5d\") " pod="openstack/memcached-0" Dec 09 15:30:19 crc kubenswrapper[4716]: I1209 15:30:19.458089 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Dec 09 15:30:20 crc kubenswrapper[4716]: I1209 15:30:20.208430 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Dec 09 15:30:20 crc kubenswrapper[4716]: I1209 15:30:20.322409 4716 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 09 15:30:20 crc kubenswrapper[4716]: I1209 15:30:20.561940 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421570-smgjt" Dec 09 15:30:20 crc kubenswrapper[4716]: I1209 15:30:20.744017 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3cb80e47-ed2e-48c3-8332-346daa870065-secret-volume\") pod \"3cb80e47-ed2e-48c3-8332-346daa870065\" (UID: \"3cb80e47-ed2e-48c3-8332-346daa870065\") " Dec 09 15:30:20 crc kubenswrapper[4716]: I1209 15:30:20.744235 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3cb80e47-ed2e-48c3-8332-346daa870065-config-volume\") pod \"3cb80e47-ed2e-48c3-8332-346daa870065\" (UID: \"3cb80e47-ed2e-48c3-8332-346daa870065\") " Dec 09 15:30:20 crc kubenswrapper[4716]: I1209 15:30:20.744287 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g8pw9\" (UniqueName: \"kubernetes.io/projected/3cb80e47-ed2e-48c3-8332-346daa870065-kube-api-access-g8pw9\") pod \"3cb80e47-ed2e-48c3-8332-346daa870065\" (UID: \"3cb80e47-ed2e-48c3-8332-346daa870065\") " Dec 09 15:30:20 crc kubenswrapper[4716]: I1209 15:30:20.757692 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb80e47-ed2e-48c3-8332-346daa870065-config-volume" (OuterVolumeSpecName: "config-volume") pod "3cb80e47-ed2e-48c3-8332-346daa870065" (UID: "3cb80e47-ed2e-48c3-8332-346daa870065"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:30:20 crc kubenswrapper[4716]: I1209 15:30:20.765675 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb80e47-ed2e-48c3-8332-346daa870065-kube-api-access-g8pw9" (OuterVolumeSpecName: "kube-api-access-g8pw9") pod "3cb80e47-ed2e-48c3-8332-346daa870065" (UID: "3cb80e47-ed2e-48c3-8332-346daa870065"). InnerVolumeSpecName "kube-api-access-g8pw9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:30:20 crc kubenswrapper[4716]: I1209 15:30:20.803279 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3cb80e47-ed2e-48c3-8332-346daa870065-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "3cb80e47-ed2e-48c3-8332-346daa870065" (UID: "3cb80e47-ed2e-48c3-8332-346daa870065"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:30:20 crc kubenswrapper[4716]: I1209 15:30:20.848913 4716 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3cb80e47-ed2e-48c3-8332-346daa870065-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 09 15:30:20 crc kubenswrapper[4716]: I1209 15:30:20.848954 4716 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3cb80e47-ed2e-48c3-8332-346daa870065-config-volume\") on node \"crc\" DevicePath \"\"" Dec 09 15:30:20 crc kubenswrapper[4716]: I1209 15:30:20.848984 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g8pw9\" (UniqueName: \"kubernetes.io/projected/3cb80e47-ed2e-48c3-8332-346daa870065-kube-api-access-g8pw9\") on node \"crc\" DevicePath \"\"" Dec 09 15:30:20 crc kubenswrapper[4716]: I1209 15:30:20.912974 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"c32f00d5-f870-4dc6-8387-81bfe37b06f8","Type":"ContainerStarted","Data":"2d75d99a3636cd1171b45f7dface65a1b5d56d6b07158e957528a6082bcfc97b"} Dec 09 15:30:20 crc kubenswrapper[4716]: I1209 15:30:20.936133 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421570-smgjt" event={"ID":"3cb80e47-ed2e-48c3-8332-346daa870065","Type":"ContainerDied","Data":"e5d48bc9e6ca746999a5a8d822d37f25bd16bc5d0cd204e66d3502a97d0ef716"} Dec 09 15:30:20 crc kubenswrapper[4716]: I1209 15:30:20.936181 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e5d48bc9e6ca746999a5a8d822d37f25bd16bc5d0cd204e66d3502a97d0ef716" Dec 09 15:30:20 crc kubenswrapper[4716]: I1209 15:30:20.936222 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421570-smgjt" Dec 09 15:30:21 crc kubenswrapper[4716]: I1209 15:30:21.194411 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Dec 09 15:30:21 crc kubenswrapper[4716]: I1209 15:30:21.996946 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"67d51dc9-9087-4d11-9f1f-0947af797f5d","Type":"ContainerStarted","Data":"ed49c3929541e967aa141fa78e204fede0e84a508105cecb0542423aa81b55bf"} Dec 09 15:30:23 crc kubenswrapper[4716]: I1209 15:30:23.483697 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Dec 09 15:30:23 crc kubenswrapper[4716]: E1209 15:30:23.484482 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3cb80e47-ed2e-48c3-8332-346daa870065" containerName="collect-profiles" Dec 09 15:30:23 crc kubenswrapper[4716]: I1209 15:30:23.484496 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="3cb80e47-ed2e-48c3-8332-346daa870065" containerName="collect-profiles" Dec 09 15:30:23 crc kubenswrapper[4716]: I1209 15:30:23.484758 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="3cb80e47-ed2e-48c3-8332-346daa870065" containerName="collect-profiles" Dec 09 15:30:23 crc kubenswrapper[4716]: I1209 15:30:23.485539 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 09 15:30:23 crc kubenswrapper[4716]: I1209 15:30:23.497513 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-jm2b4" Dec 09 15:30:23 crc kubenswrapper[4716]: I1209 15:30:23.682432 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 09 15:30:23 crc kubenswrapper[4716]: I1209 15:30:23.683891 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wf69k\" (UniqueName: \"kubernetes.io/projected/8cd62374-51f5-4445-94f2-3d575475d8e6-kube-api-access-wf69k\") pod \"kube-state-metrics-0\" (UID: \"8cd62374-51f5-4445-94f2-3d575475d8e6\") " pod="openstack/kube-state-metrics-0" Dec 09 15:30:23 crc kubenswrapper[4716]: I1209 15:30:23.786538 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wf69k\" (UniqueName: \"kubernetes.io/projected/8cd62374-51f5-4445-94f2-3d575475d8e6-kube-api-access-wf69k\") pod \"kube-state-metrics-0\" (UID: \"8cd62374-51f5-4445-94f2-3d575475d8e6\") " pod="openstack/kube-state-metrics-0" Dec 09 15:30:23 crc kubenswrapper[4716]: I1209 15:30:23.849226 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wf69k\" (UniqueName: \"kubernetes.io/projected/8cd62374-51f5-4445-94f2-3d575475d8e6-kube-api-access-wf69k\") pod \"kube-state-metrics-0\" (UID: \"8cd62374-51f5-4445-94f2-3d575475d8e6\") " pod="openstack/kube-state-metrics-0" Dec 09 15:30:24 crc kubenswrapper[4716]: I1209 15:30:24.571285 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 09 15:30:24 crc kubenswrapper[4716]: I1209 15:30:24.836549 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-ui-dashboards-7d5fb4cbfb-226pf"] Dec 09 15:30:24 crc kubenswrapper[4716]: I1209 15:30:24.837886 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-226pf" Dec 09 15:30:24 crc kubenswrapper[4716]: I1209 15:30:24.840568 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-ui-dashboards-sa-dockercfg-lsx6s" Dec 09 15:30:24 crc kubenswrapper[4716]: I1209 15:30:24.840785 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-ui-dashboards" Dec 09 15:30:24 crc kubenswrapper[4716]: I1209 15:30:24.847444 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 09 15:30:24 crc kubenswrapper[4716]: I1209 15:30:24.851047 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Dec 09 15:30:24 crc kubenswrapper[4716]: I1209 15:30:24.855818 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Dec 09 15:30:24 crc kubenswrapper[4716]: I1209 15:30:24.856131 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Dec 09 15:30:24 crc kubenswrapper[4716]: I1209 15:30:24.857260 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Dec 09 15:30:24 crc kubenswrapper[4716]: I1209 15:30:24.857337 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Dec 09 15:30:24 crc kubenswrapper[4716]: I1209 15:30:24.857701 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-d2rpr" Dec 09 15:30:24 crc kubenswrapper[4716]: I1209 15:30:24.858022 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Dec 09 15:30:24 crc kubenswrapper[4716]: I1209 15:30:24.869640 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-ui-dashboards-7d5fb4cbfb-226pf"] Dec 09 15:30:24 crc kubenswrapper[4716]: I1209 15:30:24.903441 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 09 15:30:24 crc kubenswrapper[4716]: I1209 15:30:24.974988 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0ea0c2c0-b980-461f-9f94-38eb8630a830-serving-cert\") pod \"observability-ui-dashboards-7d5fb4cbfb-226pf\" (UID: \"0ea0c2c0-b980-461f-9f94-38eb8630a830\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-226pf" Dec 09 15:30:24 crc kubenswrapper[4716]: I1209 15:30:24.975075 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/be971cf7-04bf-4487-a95a-64bb6ea739a7-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"be971cf7-04bf-4487-a95a-64bb6ea739a7\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:30:24 crc kubenswrapper[4716]: I1209 15:30:24.975131 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"prometheus-metric-storage-0\" (UID: \"be971cf7-04bf-4487-a95a-64bb6ea739a7\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:30:24 crc kubenswrapper[4716]: I1209 15:30:24.975216 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/be971cf7-04bf-4487-a95a-64bb6ea739a7-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"be971cf7-04bf-4487-a95a-64bb6ea739a7\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:30:24 crc kubenswrapper[4716]: I1209 15:30:24.975235 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rqrg7\" (UniqueName: \"kubernetes.io/projected/0ea0c2c0-b980-461f-9f94-38eb8630a830-kube-api-access-rqrg7\") pod \"observability-ui-dashboards-7d5fb4cbfb-226pf\" (UID: \"0ea0c2c0-b980-461f-9f94-38eb8630a830\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-226pf" Dec 09 15:30:24 crc kubenswrapper[4716]: I1209 15:30:24.975269 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/be971cf7-04bf-4487-a95a-64bb6ea739a7-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"be971cf7-04bf-4487-a95a-64bb6ea739a7\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:30:24 crc kubenswrapper[4716]: I1209 15:30:24.975316 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/be971cf7-04bf-4487-a95a-64bb6ea739a7-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"be971cf7-04bf-4487-a95a-64bb6ea739a7\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:30:24 crc kubenswrapper[4716]: I1209 15:30:24.975389 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/be971cf7-04bf-4487-a95a-64bb6ea739a7-config\") pod \"prometheus-metric-storage-0\" (UID: \"be971cf7-04bf-4487-a95a-64bb6ea739a7\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:30:24 crc kubenswrapper[4716]: I1209 15:30:24.975423 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6fhcb\" (UniqueName: \"kubernetes.io/projected/be971cf7-04bf-4487-a95a-64bb6ea739a7-kube-api-access-6fhcb\") pod \"prometheus-metric-storage-0\" (UID: \"be971cf7-04bf-4487-a95a-64bb6ea739a7\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:30:24 crc kubenswrapper[4716]: I1209 15:30:24.975448 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/be971cf7-04bf-4487-a95a-64bb6ea739a7-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"be971cf7-04bf-4487-a95a-64bb6ea739a7\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.077481 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/be971cf7-04bf-4487-a95a-64bb6ea739a7-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"be971cf7-04bf-4487-a95a-64bb6ea739a7\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.077567 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/be971cf7-04bf-4487-a95a-64bb6ea739a7-config\") pod \"prometheus-metric-storage-0\" (UID: \"be971cf7-04bf-4487-a95a-64bb6ea739a7\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.077608 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6fhcb\" (UniqueName: \"kubernetes.io/projected/be971cf7-04bf-4487-a95a-64bb6ea739a7-kube-api-access-6fhcb\") pod \"prometheus-metric-storage-0\" (UID: \"be971cf7-04bf-4487-a95a-64bb6ea739a7\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.077661 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/be971cf7-04bf-4487-a95a-64bb6ea739a7-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"be971cf7-04bf-4487-a95a-64bb6ea739a7\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.077736 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0ea0c2c0-b980-461f-9f94-38eb8630a830-serving-cert\") pod \"observability-ui-dashboards-7d5fb4cbfb-226pf\" (UID: \"0ea0c2c0-b980-461f-9f94-38eb8630a830\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-226pf" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.077805 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/be971cf7-04bf-4487-a95a-64bb6ea739a7-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"be971cf7-04bf-4487-a95a-64bb6ea739a7\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.077851 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"prometheus-metric-storage-0\" (UID: \"be971cf7-04bf-4487-a95a-64bb6ea739a7\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.077946 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/be971cf7-04bf-4487-a95a-64bb6ea739a7-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"be971cf7-04bf-4487-a95a-64bb6ea739a7\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.077978 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rqrg7\" (UniqueName: \"kubernetes.io/projected/0ea0c2c0-b980-461f-9f94-38eb8630a830-kube-api-access-rqrg7\") pod \"observability-ui-dashboards-7d5fb4cbfb-226pf\" (UID: \"0ea0c2c0-b980-461f-9f94-38eb8630a830\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-226pf" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.078004 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/be971cf7-04bf-4487-a95a-64bb6ea739a7-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"be971cf7-04bf-4487-a95a-64bb6ea739a7\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:30:25 crc kubenswrapper[4716]: E1209 15:30:25.079186 4716 secret.go:188] Couldn't get secret openshift-operators/observability-ui-dashboards: secret "observability-ui-dashboards" not found Dec 09 15:30:25 crc kubenswrapper[4716]: E1209 15:30:25.079268 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0ea0c2c0-b980-461f-9f94-38eb8630a830-serving-cert podName:0ea0c2c0-b980-461f-9f94-38eb8630a830 nodeName:}" failed. No retries permitted until 2025-12-09 15:30:25.579247259 +0000 UTC m=+1312.733991247 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0ea0c2c0-b980-461f-9f94-38eb8630a830-serving-cert") pod "observability-ui-dashboards-7d5fb4cbfb-226pf" (UID: "0ea0c2c0-b980-461f-9f94-38eb8630a830") : secret "observability-ui-dashboards" not found Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.079783 4716 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"prometheus-metric-storage-0\" (UID: \"be971cf7-04bf-4487-a95a-64bb6ea739a7\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/prometheus-metric-storage-0" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.080512 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/be971cf7-04bf-4487-a95a-64bb6ea739a7-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"be971cf7-04bf-4487-a95a-64bb6ea739a7\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.088644 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/be971cf7-04bf-4487-a95a-64bb6ea739a7-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"be971cf7-04bf-4487-a95a-64bb6ea739a7\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.094365 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/be971cf7-04bf-4487-a95a-64bb6ea739a7-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"be971cf7-04bf-4487-a95a-64bb6ea739a7\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.097086 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/be971cf7-04bf-4487-a95a-64bb6ea739a7-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"be971cf7-04bf-4487-a95a-64bb6ea739a7\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.116672 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/be971cf7-04bf-4487-a95a-64bb6ea739a7-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"be971cf7-04bf-4487-a95a-64bb6ea739a7\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.117193 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/be971cf7-04bf-4487-a95a-64bb6ea739a7-config\") pod \"prometheus-metric-storage-0\" (UID: \"be971cf7-04bf-4487-a95a-64bb6ea739a7\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.134391 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6fhcb\" (UniqueName: \"kubernetes.io/projected/be971cf7-04bf-4487-a95a-64bb6ea739a7-kube-api-access-6fhcb\") pod \"prometheus-metric-storage-0\" (UID: \"be971cf7-04bf-4487-a95a-64bb6ea739a7\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.179717 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rqrg7\" (UniqueName: \"kubernetes.io/projected/0ea0c2c0-b980-461f-9f94-38eb8630a830-kube-api-access-rqrg7\") pod \"observability-ui-dashboards-7d5fb4cbfb-226pf\" (UID: \"0ea0c2c0-b980-461f-9f94-38eb8630a830\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-226pf" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.302004 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"prometheus-metric-storage-0\" (UID: \"be971cf7-04bf-4487-a95a-64bb6ea739a7\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.536207 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-6577b99b65-b6rff"] Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.543768 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6577b99b65-b6rff" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.596919 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-6577b99b65-b6rff"] Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.611600 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.642067 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4b65440d-3002-4cdb-b3ff-6504e5e15bdb-trusted-ca-bundle\") pod \"console-6577b99b65-b6rff\" (UID: \"4b65440d-3002-4cdb-b3ff-6504e5e15bdb\") " pod="openshift-console/console-6577b99b65-b6rff" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.642099 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4b65440d-3002-4cdb-b3ff-6504e5e15bdb-console-serving-cert\") pod \"console-6577b99b65-b6rff\" (UID: \"4b65440d-3002-4cdb-b3ff-6504e5e15bdb\") " pod="openshift-console/console-6577b99b65-b6rff" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.642145 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0ea0c2c0-b980-461f-9f94-38eb8630a830-serving-cert\") pod \"observability-ui-dashboards-7d5fb4cbfb-226pf\" (UID: \"0ea0c2c0-b980-461f-9f94-38eb8630a830\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-226pf" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.642204 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p2kjz\" (UniqueName: \"kubernetes.io/projected/4b65440d-3002-4cdb-b3ff-6504e5e15bdb-kube-api-access-p2kjz\") pod \"console-6577b99b65-b6rff\" (UID: \"4b65440d-3002-4cdb-b3ff-6504e5e15bdb\") " pod="openshift-console/console-6577b99b65-b6rff" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.642223 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4b65440d-3002-4cdb-b3ff-6504e5e15bdb-console-oauth-config\") pod \"console-6577b99b65-b6rff\" (UID: \"4b65440d-3002-4cdb-b3ff-6504e5e15bdb\") " pod="openshift-console/console-6577b99b65-b6rff" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.642241 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4b65440d-3002-4cdb-b3ff-6504e5e15bdb-oauth-serving-cert\") pod \"console-6577b99b65-b6rff\" (UID: \"4b65440d-3002-4cdb-b3ff-6504e5e15bdb\") " pod="openshift-console/console-6577b99b65-b6rff" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.642285 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4b65440d-3002-4cdb-b3ff-6504e5e15bdb-service-ca\") pod \"console-6577b99b65-b6rff\" (UID: \"4b65440d-3002-4cdb-b3ff-6504e5e15bdb\") " pod="openshift-console/console-6577b99b65-b6rff" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.642337 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4b65440d-3002-4cdb-b3ff-6504e5e15bdb-console-config\") pod \"console-6577b99b65-b6rff\" (UID: \"4b65440d-3002-4cdb-b3ff-6504e5e15bdb\") " pod="openshift-console/console-6577b99b65-b6rff" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.663209 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0ea0c2c0-b980-461f-9f94-38eb8630a830-serving-cert\") pod \"observability-ui-dashboards-7d5fb4cbfb-226pf\" (UID: \"0ea0c2c0-b980-461f-9f94-38eb8630a830\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-226pf" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.750454 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4b65440d-3002-4cdb-b3ff-6504e5e15bdb-console-config\") pod \"console-6577b99b65-b6rff\" (UID: \"4b65440d-3002-4cdb-b3ff-6504e5e15bdb\") " pod="openshift-console/console-6577b99b65-b6rff" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.750569 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4b65440d-3002-4cdb-b3ff-6504e5e15bdb-trusted-ca-bundle\") pod \"console-6577b99b65-b6rff\" (UID: \"4b65440d-3002-4cdb-b3ff-6504e5e15bdb\") " pod="openshift-console/console-6577b99b65-b6rff" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.750594 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4b65440d-3002-4cdb-b3ff-6504e5e15bdb-console-serving-cert\") pod \"console-6577b99b65-b6rff\" (UID: \"4b65440d-3002-4cdb-b3ff-6504e5e15bdb\") " pod="openshift-console/console-6577b99b65-b6rff" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.750689 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p2kjz\" (UniqueName: \"kubernetes.io/projected/4b65440d-3002-4cdb-b3ff-6504e5e15bdb-kube-api-access-p2kjz\") pod \"console-6577b99b65-b6rff\" (UID: \"4b65440d-3002-4cdb-b3ff-6504e5e15bdb\") " pod="openshift-console/console-6577b99b65-b6rff" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.750720 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4b65440d-3002-4cdb-b3ff-6504e5e15bdb-console-oauth-config\") pod \"console-6577b99b65-b6rff\" (UID: \"4b65440d-3002-4cdb-b3ff-6504e5e15bdb\") " pod="openshift-console/console-6577b99b65-b6rff" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.750743 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4b65440d-3002-4cdb-b3ff-6504e5e15bdb-oauth-serving-cert\") pod \"console-6577b99b65-b6rff\" (UID: \"4b65440d-3002-4cdb-b3ff-6504e5e15bdb\") " pod="openshift-console/console-6577b99b65-b6rff" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.750795 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4b65440d-3002-4cdb-b3ff-6504e5e15bdb-service-ca\") pod \"console-6577b99b65-b6rff\" (UID: \"4b65440d-3002-4cdb-b3ff-6504e5e15bdb\") " pod="openshift-console/console-6577b99b65-b6rff" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.751688 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4b65440d-3002-4cdb-b3ff-6504e5e15bdb-service-ca\") pod \"console-6577b99b65-b6rff\" (UID: \"4b65440d-3002-4cdb-b3ff-6504e5e15bdb\") " pod="openshift-console/console-6577b99b65-b6rff" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.755799 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4b65440d-3002-4cdb-b3ff-6504e5e15bdb-console-config\") pod \"console-6577b99b65-b6rff\" (UID: \"4b65440d-3002-4cdb-b3ff-6504e5e15bdb\") " pod="openshift-console/console-6577b99b65-b6rff" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.756318 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4b65440d-3002-4cdb-b3ff-6504e5e15bdb-oauth-serving-cert\") pod \"console-6577b99b65-b6rff\" (UID: \"4b65440d-3002-4cdb-b3ff-6504e5e15bdb\") " pod="openshift-console/console-6577b99b65-b6rff" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.757553 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4b65440d-3002-4cdb-b3ff-6504e5e15bdb-trusted-ca-bundle\") pod \"console-6577b99b65-b6rff\" (UID: \"4b65440d-3002-4cdb-b3ff-6504e5e15bdb\") " pod="openshift-console/console-6577b99b65-b6rff" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.767258 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4b65440d-3002-4cdb-b3ff-6504e5e15bdb-console-oauth-config\") pod \"console-6577b99b65-b6rff\" (UID: \"4b65440d-3002-4cdb-b3ff-6504e5e15bdb\") " pod="openshift-console/console-6577b99b65-b6rff" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.767440 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4b65440d-3002-4cdb-b3ff-6504e5e15bdb-console-serving-cert\") pod \"console-6577b99b65-b6rff\" (UID: \"4b65440d-3002-4cdb-b3ff-6504e5e15bdb\") " pod="openshift-console/console-6577b99b65-b6rff" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.785804 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p2kjz\" (UniqueName: \"kubernetes.io/projected/4b65440d-3002-4cdb-b3ff-6504e5e15bdb-kube-api-access-p2kjz\") pod \"console-6577b99b65-b6rff\" (UID: \"4b65440d-3002-4cdb-b3ff-6504e5e15bdb\") " pod="openshift-console/console-6577b99b65-b6rff" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.872145 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-226pf" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.908212 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-8mm7h"] Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.909870 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-8mm7h" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.925231 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-8mm7h"] Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.925555 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-tjlfs" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.927765 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.928082 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Dec 09 15:30:25 crc kubenswrapper[4716]: I1209 15:30:25.928945 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6577b99b65-b6rff" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.043010 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-pf2c7"] Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.045871 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-pf2c7" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.053974 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-pf2c7"] Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.063406 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/890cdf81-6c51-4954-a7fc-ea6116941cfe-combined-ca-bundle\") pod \"ovn-controller-8mm7h\" (UID: \"890cdf81-6c51-4954-a7fc-ea6116941cfe\") " pod="openstack/ovn-controller-8mm7h" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.063653 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/890cdf81-6c51-4954-a7fc-ea6116941cfe-scripts\") pod \"ovn-controller-8mm7h\" (UID: \"890cdf81-6c51-4954-a7fc-ea6116941cfe\") " pod="openstack/ovn-controller-8mm7h" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.063837 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/890cdf81-6c51-4954-a7fc-ea6116941cfe-var-log-ovn\") pod \"ovn-controller-8mm7h\" (UID: \"890cdf81-6c51-4954-a7fc-ea6116941cfe\") " pod="openstack/ovn-controller-8mm7h" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.063964 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqfbq\" (UniqueName: \"kubernetes.io/projected/890cdf81-6c51-4954-a7fc-ea6116941cfe-kube-api-access-mqfbq\") pod \"ovn-controller-8mm7h\" (UID: \"890cdf81-6c51-4954-a7fc-ea6116941cfe\") " pod="openstack/ovn-controller-8mm7h" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.064046 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/890cdf81-6c51-4954-a7fc-ea6116941cfe-ovn-controller-tls-certs\") pod \"ovn-controller-8mm7h\" (UID: \"890cdf81-6c51-4954-a7fc-ea6116941cfe\") " pod="openstack/ovn-controller-8mm7h" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.064148 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/890cdf81-6c51-4954-a7fc-ea6116941cfe-var-run\") pod \"ovn-controller-8mm7h\" (UID: \"890cdf81-6c51-4954-a7fc-ea6116941cfe\") " pod="openstack/ovn-controller-8mm7h" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.064256 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/890cdf81-6c51-4954-a7fc-ea6116941cfe-var-run-ovn\") pod \"ovn-controller-8mm7h\" (UID: \"890cdf81-6c51-4954-a7fc-ea6116941cfe\") " pod="openstack/ovn-controller-8mm7h" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.176053 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/890cdf81-6c51-4954-a7fc-ea6116941cfe-var-run\") pod \"ovn-controller-8mm7h\" (UID: \"890cdf81-6c51-4954-a7fc-ea6116941cfe\") " pod="openstack/ovn-controller-8mm7h" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.176125 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/6f4610df-19db-432c-805c-2e8b52e5b344-var-log\") pod \"ovn-controller-ovs-pf2c7\" (UID: \"6f4610df-19db-432c-805c-2e8b52e5b344\") " pod="openstack/ovn-controller-ovs-pf2c7" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.176151 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/890cdf81-6c51-4954-a7fc-ea6116941cfe-var-run-ovn\") pod \"ovn-controller-8mm7h\" (UID: \"890cdf81-6c51-4954-a7fc-ea6116941cfe\") " pod="openstack/ovn-controller-8mm7h" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.176175 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6f4610df-19db-432c-805c-2e8b52e5b344-scripts\") pod \"ovn-controller-ovs-pf2c7\" (UID: \"6f4610df-19db-432c-805c-2e8b52e5b344\") " pod="openstack/ovn-controller-ovs-pf2c7" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.176197 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/890cdf81-6c51-4954-a7fc-ea6116941cfe-combined-ca-bundle\") pod \"ovn-controller-8mm7h\" (UID: \"890cdf81-6c51-4954-a7fc-ea6116941cfe\") " pod="openstack/ovn-controller-8mm7h" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.176221 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/890cdf81-6c51-4954-a7fc-ea6116941cfe-scripts\") pod \"ovn-controller-8mm7h\" (UID: \"890cdf81-6c51-4954-a7fc-ea6116941cfe\") " pod="openstack/ovn-controller-8mm7h" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.176266 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/6f4610df-19db-432c-805c-2e8b52e5b344-etc-ovs\") pod \"ovn-controller-ovs-pf2c7\" (UID: \"6f4610df-19db-432c-805c-2e8b52e5b344\") " pod="openstack/ovn-controller-ovs-pf2c7" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.176284 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/6f4610df-19db-432c-805c-2e8b52e5b344-var-lib\") pod \"ovn-controller-ovs-pf2c7\" (UID: \"6f4610df-19db-432c-805c-2e8b52e5b344\") " pod="openstack/ovn-controller-ovs-pf2c7" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.176310 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5rpdv\" (UniqueName: \"kubernetes.io/projected/6f4610df-19db-432c-805c-2e8b52e5b344-kube-api-access-5rpdv\") pod \"ovn-controller-ovs-pf2c7\" (UID: \"6f4610df-19db-432c-805c-2e8b52e5b344\") " pod="openstack/ovn-controller-ovs-pf2c7" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.176348 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/890cdf81-6c51-4954-a7fc-ea6116941cfe-var-log-ovn\") pod \"ovn-controller-8mm7h\" (UID: \"890cdf81-6c51-4954-a7fc-ea6116941cfe\") " pod="openstack/ovn-controller-8mm7h" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.176408 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqfbq\" (UniqueName: \"kubernetes.io/projected/890cdf81-6c51-4954-a7fc-ea6116941cfe-kube-api-access-mqfbq\") pod \"ovn-controller-8mm7h\" (UID: \"890cdf81-6c51-4954-a7fc-ea6116941cfe\") " pod="openstack/ovn-controller-8mm7h" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.176440 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/890cdf81-6c51-4954-a7fc-ea6116941cfe-ovn-controller-tls-certs\") pod \"ovn-controller-8mm7h\" (UID: \"890cdf81-6c51-4954-a7fc-ea6116941cfe\") " pod="openstack/ovn-controller-8mm7h" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.176464 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6f4610df-19db-432c-805c-2e8b52e5b344-var-run\") pod \"ovn-controller-ovs-pf2c7\" (UID: \"6f4610df-19db-432c-805c-2e8b52e5b344\") " pod="openstack/ovn-controller-ovs-pf2c7" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.177042 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/890cdf81-6c51-4954-a7fc-ea6116941cfe-var-run\") pod \"ovn-controller-8mm7h\" (UID: \"890cdf81-6c51-4954-a7fc-ea6116941cfe\") " pod="openstack/ovn-controller-8mm7h" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.177126 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/890cdf81-6c51-4954-a7fc-ea6116941cfe-var-run-ovn\") pod \"ovn-controller-8mm7h\" (UID: \"890cdf81-6c51-4954-a7fc-ea6116941cfe\") " pod="openstack/ovn-controller-8mm7h" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.180714 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/890cdf81-6c51-4954-a7fc-ea6116941cfe-var-log-ovn\") pod \"ovn-controller-8mm7h\" (UID: \"890cdf81-6c51-4954-a7fc-ea6116941cfe\") " pod="openstack/ovn-controller-8mm7h" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.192091 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/890cdf81-6c51-4954-a7fc-ea6116941cfe-scripts\") pod \"ovn-controller-8mm7h\" (UID: \"890cdf81-6c51-4954-a7fc-ea6116941cfe\") " pod="openstack/ovn-controller-8mm7h" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.205954 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/890cdf81-6c51-4954-a7fc-ea6116941cfe-ovn-controller-tls-certs\") pod \"ovn-controller-8mm7h\" (UID: \"890cdf81-6c51-4954-a7fc-ea6116941cfe\") " pod="openstack/ovn-controller-8mm7h" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.207233 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqfbq\" (UniqueName: \"kubernetes.io/projected/890cdf81-6c51-4954-a7fc-ea6116941cfe-kube-api-access-mqfbq\") pod \"ovn-controller-8mm7h\" (UID: \"890cdf81-6c51-4954-a7fc-ea6116941cfe\") " pod="openstack/ovn-controller-8mm7h" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.209200 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/890cdf81-6c51-4954-a7fc-ea6116941cfe-combined-ca-bundle\") pod \"ovn-controller-8mm7h\" (UID: \"890cdf81-6c51-4954-a7fc-ea6116941cfe\") " pod="openstack/ovn-controller-8mm7h" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.239652 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.277818 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/6f4610df-19db-432c-805c-2e8b52e5b344-var-log\") pod \"ovn-controller-ovs-pf2c7\" (UID: \"6f4610df-19db-432c-805c-2e8b52e5b344\") " pod="openstack/ovn-controller-ovs-pf2c7" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.277877 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6f4610df-19db-432c-805c-2e8b52e5b344-scripts\") pod \"ovn-controller-ovs-pf2c7\" (UID: \"6f4610df-19db-432c-805c-2e8b52e5b344\") " pod="openstack/ovn-controller-ovs-pf2c7" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.277951 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/6f4610df-19db-432c-805c-2e8b52e5b344-etc-ovs\") pod \"ovn-controller-ovs-pf2c7\" (UID: \"6f4610df-19db-432c-805c-2e8b52e5b344\") " pod="openstack/ovn-controller-ovs-pf2c7" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.277968 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/6f4610df-19db-432c-805c-2e8b52e5b344-var-lib\") pod \"ovn-controller-ovs-pf2c7\" (UID: \"6f4610df-19db-432c-805c-2e8b52e5b344\") " pod="openstack/ovn-controller-ovs-pf2c7" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.277995 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5rpdv\" (UniqueName: \"kubernetes.io/projected/6f4610df-19db-432c-805c-2e8b52e5b344-kube-api-access-5rpdv\") pod \"ovn-controller-ovs-pf2c7\" (UID: \"6f4610df-19db-432c-805c-2e8b52e5b344\") " pod="openstack/ovn-controller-ovs-pf2c7" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.278079 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6f4610df-19db-432c-805c-2e8b52e5b344-var-run\") pod \"ovn-controller-ovs-pf2c7\" (UID: \"6f4610df-19db-432c-805c-2e8b52e5b344\") " pod="openstack/ovn-controller-ovs-pf2c7" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.279387 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/6f4610df-19db-432c-805c-2e8b52e5b344-var-log\") pod \"ovn-controller-ovs-pf2c7\" (UID: \"6f4610df-19db-432c-805c-2e8b52e5b344\") " pod="openstack/ovn-controller-ovs-pf2c7" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.280284 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/6f4610df-19db-432c-805c-2e8b52e5b344-etc-ovs\") pod \"ovn-controller-ovs-pf2c7\" (UID: \"6f4610df-19db-432c-805c-2e8b52e5b344\") " pod="openstack/ovn-controller-ovs-pf2c7" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.280339 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6f4610df-19db-432c-805c-2e8b52e5b344-var-run\") pod \"ovn-controller-ovs-pf2c7\" (UID: \"6f4610df-19db-432c-805c-2e8b52e5b344\") " pod="openstack/ovn-controller-ovs-pf2c7" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.280403 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/6f4610df-19db-432c-805c-2e8b52e5b344-var-lib\") pod \"ovn-controller-ovs-pf2c7\" (UID: \"6f4610df-19db-432c-805c-2e8b52e5b344\") " pod="openstack/ovn-controller-ovs-pf2c7" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.283101 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6f4610df-19db-432c-805c-2e8b52e5b344-scripts\") pod \"ovn-controller-ovs-pf2c7\" (UID: \"6f4610df-19db-432c-805c-2e8b52e5b344\") " pod="openstack/ovn-controller-ovs-pf2c7" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.301200 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5rpdv\" (UniqueName: \"kubernetes.io/projected/6f4610df-19db-432c-805c-2e8b52e5b344-kube-api-access-5rpdv\") pod \"ovn-controller-ovs-pf2c7\" (UID: \"6f4610df-19db-432c-805c-2e8b52e5b344\") " pod="openstack/ovn-controller-ovs-pf2c7" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.320372 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-8mm7h" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.415304 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-pf2c7" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.650799 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.652569 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.669111 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-g7wcx" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.669369 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.669833 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.670021 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.670252 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.700478 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/fa255b9e-51c8-407a-aebe-606da43b9906-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"fa255b9e-51c8-407a-aebe-606da43b9906\") " pod="openstack/ovsdbserver-nb-0" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.700646 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fa255b9e-51c8-407a-aebe-606da43b9906-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"fa255b9e-51c8-407a-aebe-606da43b9906\") " pod="openstack/ovsdbserver-nb-0" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.700696 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/fa255b9e-51c8-407a-aebe-606da43b9906-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"fa255b9e-51c8-407a-aebe-606da43b9906\") " pod="openstack/ovsdbserver-nb-0" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.700726 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/fa255b9e-51c8-407a-aebe-606da43b9906-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"fa255b9e-51c8-407a-aebe-606da43b9906\") " pod="openstack/ovsdbserver-nb-0" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.700779 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"ovsdbserver-nb-0\" (UID: \"fa255b9e-51c8-407a-aebe-606da43b9906\") " pod="openstack/ovsdbserver-nb-0" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.700800 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa255b9e-51c8-407a-aebe-606da43b9906-config\") pod \"ovsdbserver-nb-0\" (UID: \"fa255b9e-51c8-407a-aebe-606da43b9906\") " pod="openstack/ovsdbserver-nb-0" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.700849 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dqm7k\" (UniqueName: \"kubernetes.io/projected/fa255b9e-51c8-407a-aebe-606da43b9906-kube-api-access-dqm7k\") pod \"ovsdbserver-nb-0\" (UID: \"fa255b9e-51c8-407a-aebe-606da43b9906\") " pod="openstack/ovsdbserver-nb-0" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.700950 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa255b9e-51c8-407a-aebe-606da43b9906-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"fa255b9e-51c8-407a-aebe-606da43b9906\") " pod="openstack/ovsdbserver-nb-0" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.714271 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.773224 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"8cd62374-51f5-4445-94f2-3d575475d8e6","Type":"ContainerStarted","Data":"29ab2f1b0d1be8769b6cf008d1aa83819b5f56bed4af0977b2eda911013a69c2"} Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.812133 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fa255b9e-51c8-407a-aebe-606da43b9906-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"fa255b9e-51c8-407a-aebe-606da43b9906\") " pod="openstack/ovsdbserver-nb-0" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.812213 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/fa255b9e-51c8-407a-aebe-606da43b9906-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"fa255b9e-51c8-407a-aebe-606da43b9906\") " pod="openstack/ovsdbserver-nb-0" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.812517 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/fa255b9e-51c8-407a-aebe-606da43b9906-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"fa255b9e-51c8-407a-aebe-606da43b9906\") " pod="openstack/ovsdbserver-nb-0" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.812581 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"ovsdbserver-nb-0\" (UID: \"fa255b9e-51c8-407a-aebe-606da43b9906\") " pod="openstack/ovsdbserver-nb-0" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.812597 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa255b9e-51c8-407a-aebe-606da43b9906-config\") pod \"ovsdbserver-nb-0\" (UID: \"fa255b9e-51c8-407a-aebe-606da43b9906\") " pod="openstack/ovsdbserver-nb-0" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.812672 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dqm7k\" (UniqueName: \"kubernetes.io/projected/fa255b9e-51c8-407a-aebe-606da43b9906-kube-api-access-dqm7k\") pod \"ovsdbserver-nb-0\" (UID: \"fa255b9e-51c8-407a-aebe-606da43b9906\") " pod="openstack/ovsdbserver-nb-0" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.812848 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa255b9e-51c8-407a-aebe-606da43b9906-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"fa255b9e-51c8-407a-aebe-606da43b9906\") " pod="openstack/ovsdbserver-nb-0" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.812926 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/fa255b9e-51c8-407a-aebe-606da43b9906-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"fa255b9e-51c8-407a-aebe-606da43b9906\") " pod="openstack/ovsdbserver-nb-0" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.814335 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/fa255b9e-51c8-407a-aebe-606da43b9906-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"fa255b9e-51c8-407a-aebe-606da43b9906\") " pod="openstack/ovsdbserver-nb-0" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.814663 4716 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"ovsdbserver-nb-0\" (UID: \"fa255b9e-51c8-407a-aebe-606da43b9906\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/ovsdbserver-nb-0" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.816360 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fa255b9e-51c8-407a-aebe-606da43b9906-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"fa255b9e-51c8-407a-aebe-606da43b9906\") " pod="openstack/ovsdbserver-nb-0" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.834020 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa255b9e-51c8-407a-aebe-606da43b9906-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"fa255b9e-51c8-407a-aebe-606da43b9906\") " pod="openstack/ovsdbserver-nb-0" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.835047 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa255b9e-51c8-407a-aebe-606da43b9906-config\") pod \"ovsdbserver-nb-0\" (UID: \"fa255b9e-51c8-407a-aebe-606da43b9906\") " pod="openstack/ovsdbserver-nb-0" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.837788 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/fa255b9e-51c8-407a-aebe-606da43b9906-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"fa255b9e-51c8-407a-aebe-606da43b9906\") " pod="openstack/ovsdbserver-nb-0" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.863709 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.869738 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/fa255b9e-51c8-407a-aebe-606da43b9906-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"fa255b9e-51c8-407a-aebe-606da43b9906\") " pod="openstack/ovsdbserver-nb-0" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.888382 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dqm7k\" (UniqueName: \"kubernetes.io/projected/fa255b9e-51c8-407a-aebe-606da43b9906-kube-api-access-dqm7k\") pod \"ovsdbserver-nb-0\" (UID: \"fa255b9e-51c8-407a-aebe-606da43b9906\") " pod="openstack/ovsdbserver-nb-0" Dec 09 15:30:26 crc kubenswrapper[4716]: I1209 15:30:26.923830 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"ovsdbserver-nb-0\" (UID: \"fa255b9e-51c8-407a-aebe-606da43b9906\") " pod="openstack/ovsdbserver-nb-0" Dec 09 15:30:27 crc kubenswrapper[4716]: I1209 15:30:27.040007 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Dec 09 15:30:27 crc kubenswrapper[4716]: I1209 15:30:27.414691 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-ui-dashboards-7d5fb4cbfb-226pf"] Dec 09 15:30:27 crc kubenswrapper[4716]: I1209 15:30:27.809035 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-6577b99b65-b6rff"] Dec 09 15:30:27 crc kubenswrapper[4716]: I1209 15:30:27.830470 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-226pf" event={"ID":"0ea0c2c0-b980-461f-9f94-38eb8630a830","Type":"ContainerStarted","Data":"81b07b9126ce72d1f68976e9edd8511081d70cb823d95ce9ca79c8b0cf69114e"} Dec 09 15:30:27 crc kubenswrapper[4716]: W1209 15:30:27.849232 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4b65440d_3002_4cdb_b3ff_6504e5e15bdb.slice/crio-c538b374c187b5c6d9b9fd064e8daa55f84955ddc7462770359c5bafdcef317a WatchSource:0}: Error finding container c538b374c187b5c6d9b9fd064e8daa55f84955ddc7462770359c5bafdcef317a: Status 404 returned error can't find the container with id c538b374c187b5c6d9b9fd064e8daa55f84955ddc7462770359c5bafdcef317a Dec 09 15:30:27 crc kubenswrapper[4716]: I1209 15:30:27.850340 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"be971cf7-04bf-4487-a95a-64bb6ea739a7","Type":"ContainerStarted","Data":"06fc611ec5adb9a1f5d6cd266b417bef2da5cba51aed5cd3b1cf260c843ce958"} Dec 09 15:30:28 crc kubenswrapper[4716]: I1209 15:30:28.148058 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-8mm7h"] Dec 09 15:30:28 crc kubenswrapper[4716]: I1209 15:30:28.224971 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Dec 09 15:30:28 crc kubenswrapper[4716]: I1209 15:30:28.248785 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Dec 09 15:30:28 crc kubenswrapper[4716]: I1209 15:30:28.248915 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Dec 09 15:30:28 crc kubenswrapper[4716]: I1209 15:30:28.255576 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-g2tn8" Dec 09 15:30:28 crc kubenswrapper[4716]: I1209 15:30:28.255857 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Dec 09 15:30:28 crc kubenswrapper[4716]: I1209 15:30:28.255980 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Dec 09 15:30:28 crc kubenswrapper[4716]: I1209 15:30:28.256126 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Dec 09 15:30:28 crc kubenswrapper[4716]: I1209 15:30:28.372045 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07a42bcd-0edf-4215-a2ce-e5b66b8c09d7-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"07a42bcd-0edf-4215-a2ce-e5b66b8c09d7\") " pod="openstack/ovsdbserver-sb-0" Dec 09 15:30:28 crc kubenswrapper[4716]: I1209 15:30:28.372172 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/07a42bcd-0edf-4215-a2ce-e5b66b8c09d7-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"07a42bcd-0edf-4215-a2ce-e5b66b8c09d7\") " pod="openstack/ovsdbserver-sb-0" Dec 09 15:30:28 crc kubenswrapper[4716]: I1209 15:30:28.372207 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-sb-0\" (UID: \"07a42bcd-0edf-4215-a2ce-e5b66b8c09d7\") " pod="openstack/ovsdbserver-sb-0" Dec 09 15:30:28 crc kubenswrapper[4716]: I1209 15:30:28.372233 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/07a42bcd-0edf-4215-a2ce-e5b66b8c09d7-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"07a42bcd-0edf-4215-a2ce-e5b66b8c09d7\") " pod="openstack/ovsdbserver-sb-0" Dec 09 15:30:28 crc kubenswrapper[4716]: I1209 15:30:28.372290 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/07a42bcd-0edf-4215-a2ce-e5b66b8c09d7-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"07a42bcd-0edf-4215-a2ce-e5b66b8c09d7\") " pod="openstack/ovsdbserver-sb-0" Dec 09 15:30:28 crc kubenswrapper[4716]: I1209 15:30:28.372367 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/07a42bcd-0edf-4215-a2ce-e5b66b8c09d7-config\") pod \"ovsdbserver-sb-0\" (UID: \"07a42bcd-0edf-4215-a2ce-e5b66b8c09d7\") " pod="openstack/ovsdbserver-sb-0" Dec 09 15:30:28 crc kubenswrapper[4716]: I1209 15:30:28.372422 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mctzs\" (UniqueName: \"kubernetes.io/projected/07a42bcd-0edf-4215-a2ce-e5b66b8c09d7-kube-api-access-mctzs\") pod \"ovsdbserver-sb-0\" (UID: \"07a42bcd-0edf-4215-a2ce-e5b66b8c09d7\") " pod="openstack/ovsdbserver-sb-0" Dec 09 15:30:28 crc kubenswrapper[4716]: I1209 15:30:28.372488 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/07a42bcd-0edf-4215-a2ce-e5b66b8c09d7-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"07a42bcd-0edf-4215-a2ce-e5b66b8c09d7\") " pod="openstack/ovsdbserver-sb-0" Dec 09 15:30:28 crc kubenswrapper[4716]: I1209 15:30:28.473874 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07a42bcd-0edf-4215-a2ce-e5b66b8c09d7-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"07a42bcd-0edf-4215-a2ce-e5b66b8c09d7\") " pod="openstack/ovsdbserver-sb-0" Dec 09 15:30:28 crc kubenswrapper[4716]: I1209 15:30:28.473950 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/07a42bcd-0edf-4215-a2ce-e5b66b8c09d7-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"07a42bcd-0edf-4215-a2ce-e5b66b8c09d7\") " pod="openstack/ovsdbserver-sb-0" Dec 09 15:30:28 crc kubenswrapper[4716]: I1209 15:30:28.473991 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-sb-0\" (UID: \"07a42bcd-0edf-4215-a2ce-e5b66b8c09d7\") " pod="openstack/ovsdbserver-sb-0" Dec 09 15:30:28 crc kubenswrapper[4716]: I1209 15:30:28.474024 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/07a42bcd-0edf-4215-a2ce-e5b66b8c09d7-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"07a42bcd-0edf-4215-a2ce-e5b66b8c09d7\") " pod="openstack/ovsdbserver-sb-0" Dec 09 15:30:28 crc kubenswrapper[4716]: I1209 15:30:28.474092 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/07a42bcd-0edf-4215-a2ce-e5b66b8c09d7-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"07a42bcd-0edf-4215-a2ce-e5b66b8c09d7\") " pod="openstack/ovsdbserver-sb-0" Dec 09 15:30:28 crc kubenswrapper[4716]: I1209 15:30:28.474165 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/07a42bcd-0edf-4215-a2ce-e5b66b8c09d7-config\") pod \"ovsdbserver-sb-0\" (UID: \"07a42bcd-0edf-4215-a2ce-e5b66b8c09d7\") " pod="openstack/ovsdbserver-sb-0" Dec 09 15:30:28 crc kubenswrapper[4716]: I1209 15:30:28.474212 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mctzs\" (UniqueName: \"kubernetes.io/projected/07a42bcd-0edf-4215-a2ce-e5b66b8c09d7-kube-api-access-mctzs\") pod \"ovsdbserver-sb-0\" (UID: \"07a42bcd-0edf-4215-a2ce-e5b66b8c09d7\") " pod="openstack/ovsdbserver-sb-0" Dec 09 15:30:28 crc kubenswrapper[4716]: I1209 15:30:28.474273 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/07a42bcd-0edf-4215-a2ce-e5b66b8c09d7-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"07a42bcd-0edf-4215-a2ce-e5b66b8c09d7\") " pod="openstack/ovsdbserver-sb-0" Dec 09 15:30:28 crc kubenswrapper[4716]: I1209 15:30:28.477024 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/07a42bcd-0edf-4215-a2ce-e5b66b8c09d7-config\") pod \"ovsdbserver-sb-0\" (UID: \"07a42bcd-0edf-4215-a2ce-e5b66b8c09d7\") " pod="openstack/ovsdbserver-sb-0" Dec 09 15:30:28 crc kubenswrapper[4716]: I1209 15:30:28.477352 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/07a42bcd-0edf-4215-a2ce-e5b66b8c09d7-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"07a42bcd-0edf-4215-a2ce-e5b66b8c09d7\") " pod="openstack/ovsdbserver-sb-0" Dec 09 15:30:28 crc kubenswrapper[4716]: I1209 15:30:28.477656 4716 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-sb-0\" (UID: \"07a42bcd-0edf-4215-a2ce-e5b66b8c09d7\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/ovsdbserver-sb-0" Dec 09 15:30:28 crc kubenswrapper[4716]: I1209 15:30:28.485303 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/07a42bcd-0edf-4215-a2ce-e5b66b8c09d7-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"07a42bcd-0edf-4215-a2ce-e5b66b8c09d7\") " pod="openstack/ovsdbserver-sb-0" Dec 09 15:30:28 crc kubenswrapper[4716]: I1209 15:30:28.504189 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mctzs\" (UniqueName: \"kubernetes.io/projected/07a42bcd-0edf-4215-a2ce-e5b66b8c09d7-kube-api-access-mctzs\") pod \"ovsdbserver-sb-0\" (UID: \"07a42bcd-0edf-4215-a2ce-e5b66b8c09d7\") " pod="openstack/ovsdbserver-sb-0" Dec 09 15:30:28 crc kubenswrapper[4716]: I1209 15:30:28.514648 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/07a42bcd-0edf-4215-a2ce-e5b66b8c09d7-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"07a42bcd-0edf-4215-a2ce-e5b66b8c09d7\") " pod="openstack/ovsdbserver-sb-0" Dec 09 15:30:28 crc kubenswrapper[4716]: I1209 15:30:28.514805 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07a42bcd-0edf-4215-a2ce-e5b66b8c09d7-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"07a42bcd-0edf-4215-a2ce-e5b66b8c09d7\") " pod="openstack/ovsdbserver-sb-0" Dec 09 15:30:28 crc kubenswrapper[4716]: I1209 15:30:28.519045 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-sb-0\" (UID: \"07a42bcd-0edf-4215-a2ce-e5b66b8c09d7\") " pod="openstack/ovsdbserver-sb-0" Dec 09 15:30:28 crc kubenswrapper[4716]: I1209 15:30:28.535197 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/07a42bcd-0edf-4215-a2ce-e5b66b8c09d7-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"07a42bcd-0edf-4215-a2ce-e5b66b8c09d7\") " pod="openstack/ovsdbserver-sb-0" Dec 09 15:30:28 crc kubenswrapper[4716]: I1209 15:30:28.638691 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Dec 09 15:30:28 crc kubenswrapper[4716]: I1209 15:30:28.781290 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-pf2c7"] Dec 09 15:30:28 crc kubenswrapper[4716]: I1209 15:30:28.951026 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-8mm7h" event={"ID":"890cdf81-6c51-4954-a7fc-ea6116941cfe","Type":"ContainerStarted","Data":"bdca3a7d2085cd1edb0efa6738d244d818874fee89270f7ffa7cf0a02f15e855"} Dec 09 15:30:28 crc kubenswrapper[4716]: I1209 15:30:28.982893 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6577b99b65-b6rff" event={"ID":"4b65440d-3002-4cdb-b3ff-6504e5e15bdb","Type":"ContainerStarted","Data":"e251cb3c87511fa61e5f5dcf313711db868c93ceef0b02199db4dbd37db2ef44"} Dec 09 15:30:28 crc kubenswrapper[4716]: I1209 15:30:28.983293 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6577b99b65-b6rff" event={"ID":"4b65440d-3002-4cdb-b3ff-6504e5e15bdb","Type":"ContainerStarted","Data":"c538b374c187b5c6d9b9fd064e8daa55f84955ddc7462770359c5bafdcef317a"} Dec 09 15:30:29 crc kubenswrapper[4716]: I1209 15:30:29.047516 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-6577b99b65-b6rff" podStartSLOduration=4.047477208 podStartE2EDuration="4.047477208s" podCreationTimestamp="2025-12-09 15:30:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:30:29.020221303 +0000 UTC m=+1316.174965311" watchObservedRunningTime="2025-12-09 15:30:29.047477208 +0000 UTC m=+1316.202221196" Dec 09 15:30:29 crc kubenswrapper[4716]: I1209 15:30:29.267788 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Dec 09 15:30:32 crc kubenswrapper[4716]: W1209 15:30:32.759756 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6f4610df_19db_432c_805c_2e8b52e5b344.slice/crio-e8c3af8b03d1a491d8b204a53ba8cf13331ef3c6939ac5be5449bbc818c0cbe3 WatchSource:0}: Error finding container e8c3af8b03d1a491d8b204a53ba8cf13331ef3c6939ac5be5449bbc818c0cbe3: Status 404 returned error can't find the container with id e8c3af8b03d1a491d8b204a53ba8cf13331ef3c6939ac5be5449bbc818c0cbe3 Dec 09 15:30:32 crc kubenswrapper[4716]: W1209 15:30:32.760720 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfa255b9e_51c8_407a_aebe_606da43b9906.slice/crio-a3d0653a85eea663005759b0368373d2b9dbe7d567bb9a2ec4f9f837b4bb9bac WatchSource:0}: Error finding container a3d0653a85eea663005759b0368373d2b9dbe7d567bb9a2ec4f9f837b4bb9bac: Status 404 returned error can't find the container with id a3d0653a85eea663005759b0368373d2b9dbe7d567bb9a2ec4f9f837b4bb9bac Dec 09 15:30:33 crc kubenswrapper[4716]: I1209 15:30:33.072341 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-pf2c7" event={"ID":"6f4610df-19db-432c-805c-2e8b52e5b344","Type":"ContainerStarted","Data":"e8c3af8b03d1a491d8b204a53ba8cf13331ef3c6939ac5be5449bbc818c0cbe3"} Dec 09 15:30:33 crc kubenswrapper[4716]: I1209 15:30:33.076163 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"fa255b9e-51c8-407a-aebe-606da43b9906","Type":"ContainerStarted","Data":"a3d0653a85eea663005759b0368373d2b9dbe7d567bb9a2ec4f9f837b4bb9bac"} Dec 09 15:30:35 crc kubenswrapper[4716]: I1209 15:30:35.930108 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-6577b99b65-b6rff" Dec 09 15:30:35 crc kubenswrapper[4716]: I1209 15:30:35.930752 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-6577b99b65-b6rff" Dec 09 15:30:35 crc kubenswrapper[4716]: I1209 15:30:35.942022 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-6577b99b65-b6rff" Dec 09 15:30:36 crc kubenswrapper[4716]: I1209 15:30:36.117722 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-6577b99b65-b6rff" Dec 09 15:30:36 crc kubenswrapper[4716]: I1209 15:30:36.201497 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f76b57f8b-ltlwj"] Dec 09 15:30:37 crc kubenswrapper[4716]: I1209 15:30:37.739720 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-g6rkt"] Dec 09 15:30:37 crc kubenswrapper[4716]: I1209 15:30:37.741821 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-g6rkt" Dec 09 15:30:37 crc kubenswrapper[4716]: I1209 15:30:37.745170 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-g6rkt"] Dec 09 15:30:37 crc kubenswrapper[4716]: I1209 15:30:37.746801 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Dec 09 15:30:37 crc kubenswrapper[4716]: I1209 15:30:37.896028 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1fa68337-088e-40a6-8cc4-3b2b0947f959-config\") pod \"ovn-controller-metrics-g6rkt\" (UID: \"1fa68337-088e-40a6-8cc4-3b2b0947f959\") " pod="openstack/ovn-controller-metrics-g6rkt" Dec 09 15:30:37 crc kubenswrapper[4716]: I1209 15:30:37.896085 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pqttb\" (UniqueName: \"kubernetes.io/projected/1fa68337-088e-40a6-8cc4-3b2b0947f959-kube-api-access-pqttb\") pod \"ovn-controller-metrics-g6rkt\" (UID: \"1fa68337-088e-40a6-8cc4-3b2b0947f959\") " pod="openstack/ovn-controller-metrics-g6rkt" Dec 09 15:30:37 crc kubenswrapper[4716]: I1209 15:30:37.896164 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1fa68337-088e-40a6-8cc4-3b2b0947f959-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-g6rkt\" (UID: \"1fa68337-088e-40a6-8cc4-3b2b0947f959\") " pod="openstack/ovn-controller-metrics-g6rkt" Dec 09 15:30:37 crc kubenswrapper[4716]: I1209 15:30:37.896251 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1fa68337-088e-40a6-8cc4-3b2b0947f959-combined-ca-bundle\") pod \"ovn-controller-metrics-g6rkt\" (UID: \"1fa68337-088e-40a6-8cc4-3b2b0947f959\") " pod="openstack/ovn-controller-metrics-g6rkt" Dec 09 15:30:37 crc kubenswrapper[4716]: I1209 15:30:37.896285 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/1fa68337-088e-40a6-8cc4-3b2b0947f959-ovn-rundir\") pod \"ovn-controller-metrics-g6rkt\" (UID: \"1fa68337-088e-40a6-8cc4-3b2b0947f959\") " pod="openstack/ovn-controller-metrics-g6rkt" Dec 09 15:30:37 crc kubenswrapper[4716]: I1209 15:30:37.896313 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/1fa68337-088e-40a6-8cc4-3b2b0947f959-ovs-rundir\") pod \"ovn-controller-metrics-g6rkt\" (UID: \"1fa68337-088e-40a6-8cc4-3b2b0947f959\") " pod="openstack/ovn-controller-metrics-g6rkt" Dec 09 15:30:37 crc kubenswrapper[4716]: I1209 15:30:37.902169 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-h54jn"] Dec 09 15:30:37 crc kubenswrapper[4716]: I1209 15:30:37.930916 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-l6zl8"] Dec 09 15:30:37 crc kubenswrapper[4716]: I1209 15:30:37.944577 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-l6zl8"] Dec 09 15:30:37 crc kubenswrapper[4716]: I1209 15:30:37.944727 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-l6zl8" Dec 09 15:30:37 crc kubenswrapper[4716]: I1209 15:30:37.947306 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Dec 09 15:30:37 crc kubenswrapper[4716]: I1209 15:30:37.998184 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1fa68337-088e-40a6-8cc4-3b2b0947f959-config\") pod \"ovn-controller-metrics-g6rkt\" (UID: \"1fa68337-088e-40a6-8cc4-3b2b0947f959\") " pod="openstack/ovn-controller-metrics-g6rkt" Dec 09 15:30:37 crc kubenswrapper[4716]: I1209 15:30:37.998250 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pqttb\" (UniqueName: \"kubernetes.io/projected/1fa68337-088e-40a6-8cc4-3b2b0947f959-kube-api-access-pqttb\") pod \"ovn-controller-metrics-g6rkt\" (UID: \"1fa68337-088e-40a6-8cc4-3b2b0947f959\") " pod="openstack/ovn-controller-metrics-g6rkt" Dec 09 15:30:37 crc kubenswrapper[4716]: I1209 15:30:37.998353 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1fa68337-088e-40a6-8cc4-3b2b0947f959-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-g6rkt\" (UID: \"1fa68337-088e-40a6-8cc4-3b2b0947f959\") " pod="openstack/ovn-controller-metrics-g6rkt" Dec 09 15:30:37 crc kubenswrapper[4716]: I1209 15:30:37.998515 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1fa68337-088e-40a6-8cc4-3b2b0947f959-combined-ca-bundle\") pod \"ovn-controller-metrics-g6rkt\" (UID: \"1fa68337-088e-40a6-8cc4-3b2b0947f959\") " pod="openstack/ovn-controller-metrics-g6rkt" Dec 09 15:30:37 crc kubenswrapper[4716]: I1209 15:30:37.998754 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/1fa68337-088e-40a6-8cc4-3b2b0947f959-ovn-rundir\") pod \"ovn-controller-metrics-g6rkt\" (UID: \"1fa68337-088e-40a6-8cc4-3b2b0947f959\") " pod="openstack/ovn-controller-metrics-g6rkt" Dec 09 15:30:37 crc kubenswrapper[4716]: I1209 15:30:37.998814 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/1fa68337-088e-40a6-8cc4-3b2b0947f959-ovs-rundir\") pod \"ovn-controller-metrics-g6rkt\" (UID: \"1fa68337-088e-40a6-8cc4-3b2b0947f959\") " pod="openstack/ovn-controller-metrics-g6rkt" Dec 09 15:30:37 crc kubenswrapper[4716]: I1209 15:30:37.999136 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/1fa68337-088e-40a6-8cc4-3b2b0947f959-ovs-rundir\") pod \"ovn-controller-metrics-g6rkt\" (UID: \"1fa68337-088e-40a6-8cc4-3b2b0947f959\") " pod="openstack/ovn-controller-metrics-g6rkt" Dec 09 15:30:37 crc kubenswrapper[4716]: I1209 15:30:37.999206 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/1fa68337-088e-40a6-8cc4-3b2b0947f959-ovn-rundir\") pod \"ovn-controller-metrics-g6rkt\" (UID: \"1fa68337-088e-40a6-8cc4-3b2b0947f959\") " pod="openstack/ovn-controller-metrics-g6rkt" Dec 09 15:30:37 crc kubenswrapper[4716]: I1209 15:30:37.999239 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1fa68337-088e-40a6-8cc4-3b2b0947f959-config\") pod \"ovn-controller-metrics-g6rkt\" (UID: \"1fa68337-088e-40a6-8cc4-3b2b0947f959\") " pod="openstack/ovn-controller-metrics-g6rkt" Dec 09 15:30:38 crc kubenswrapper[4716]: I1209 15:30:38.004566 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1fa68337-088e-40a6-8cc4-3b2b0947f959-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-g6rkt\" (UID: \"1fa68337-088e-40a6-8cc4-3b2b0947f959\") " pod="openstack/ovn-controller-metrics-g6rkt" Dec 09 15:30:38 crc kubenswrapper[4716]: I1209 15:30:38.004750 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1fa68337-088e-40a6-8cc4-3b2b0947f959-combined-ca-bundle\") pod \"ovn-controller-metrics-g6rkt\" (UID: \"1fa68337-088e-40a6-8cc4-3b2b0947f959\") " pod="openstack/ovn-controller-metrics-g6rkt" Dec 09 15:30:38 crc kubenswrapper[4716]: I1209 15:30:38.038985 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pqttb\" (UniqueName: \"kubernetes.io/projected/1fa68337-088e-40a6-8cc4-3b2b0947f959-kube-api-access-pqttb\") pod \"ovn-controller-metrics-g6rkt\" (UID: \"1fa68337-088e-40a6-8cc4-3b2b0947f959\") " pod="openstack/ovn-controller-metrics-g6rkt" Dec 09 15:30:38 crc kubenswrapper[4716]: I1209 15:30:38.073347 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-g6rkt" Dec 09 15:30:38 crc kubenswrapper[4716]: I1209 15:30:38.102306 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/31f45c75-847f-4adc-8bdf-3438bf5f9a4c-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-l6zl8\" (UID: \"31f45c75-847f-4adc-8bdf-3438bf5f9a4c\") " pod="openstack/dnsmasq-dns-7fd796d7df-l6zl8" Dec 09 15:30:38 crc kubenswrapper[4716]: I1209 15:30:38.102369 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31f45c75-847f-4adc-8bdf-3438bf5f9a4c-config\") pod \"dnsmasq-dns-7fd796d7df-l6zl8\" (UID: \"31f45c75-847f-4adc-8bdf-3438bf5f9a4c\") " pod="openstack/dnsmasq-dns-7fd796d7df-l6zl8" Dec 09 15:30:38 crc kubenswrapper[4716]: I1209 15:30:38.102406 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nswhz\" (UniqueName: \"kubernetes.io/projected/31f45c75-847f-4adc-8bdf-3438bf5f9a4c-kube-api-access-nswhz\") pod \"dnsmasq-dns-7fd796d7df-l6zl8\" (UID: \"31f45c75-847f-4adc-8bdf-3438bf5f9a4c\") " pod="openstack/dnsmasq-dns-7fd796d7df-l6zl8" Dec 09 15:30:38 crc kubenswrapper[4716]: I1209 15:30:38.102474 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/31f45c75-847f-4adc-8bdf-3438bf5f9a4c-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-l6zl8\" (UID: \"31f45c75-847f-4adc-8bdf-3438bf5f9a4c\") " pod="openstack/dnsmasq-dns-7fd796d7df-l6zl8" Dec 09 15:30:38 crc kubenswrapper[4716]: I1209 15:30:38.204246 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/31f45c75-847f-4adc-8bdf-3438bf5f9a4c-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-l6zl8\" (UID: \"31f45c75-847f-4adc-8bdf-3438bf5f9a4c\") " pod="openstack/dnsmasq-dns-7fd796d7df-l6zl8" Dec 09 15:30:38 crc kubenswrapper[4716]: I1209 15:30:38.204297 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31f45c75-847f-4adc-8bdf-3438bf5f9a4c-config\") pod \"dnsmasq-dns-7fd796d7df-l6zl8\" (UID: \"31f45c75-847f-4adc-8bdf-3438bf5f9a4c\") " pod="openstack/dnsmasq-dns-7fd796d7df-l6zl8" Dec 09 15:30:38 crc kubenswrapper[4716]: I1209 15:30:38.204329 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nswhz\" (UniqueName: \"kubernetes.io/projected/31f45c75-847f-4adc-8bdf-3438bf5f9a4c-kube-api-access-nswhz\") pod \"dnsmasq-dns-7fd796d7df-l6zl8\" (UID: \"31f45c75-847f-4adc-8bdf-3438bf5f9a4c\") " pod="openstack/dnsmasq-dns-7fd796d7df-l6zl8" Dec 09 15:30:38 crc kubenswrapper[4716]: I1209 15:30:38.204377 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/31f45c75-847f-4adc-8bdf-3438bf5f9a4c-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-l6zl8\" (UID: \"31f45c75-847f-4adc-8bdf-3438bf5f9a4c\") " pod="openstack/dnsmasq-dns-7fd796d7df-l6zl8" Dec 09 15:30:38 crc kubenswrapper[4716]: I1209 15:30:38.205417 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/31f45c75-847f-4adc-8bdf-3438bf5f9a4c-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-l6zl8\" (UID: \"31f45c75-847f-4adc-8bdf-3438bf5f9a4c\") " pod="openstack/dnsmasq-dns-7fd796d7df-l6zl8" Dec 09 15:30:38 crc kubenswrapper[4716]: I1209 15:30:38.205665 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/31f45c75-847f-4adc-8bdf-3438bf5f9a4c-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-l6zl8\" (UID: \"31f45c75-847f-4adc-8bdf-3438bf5f9a4c\") " pod="openstack/dnsmasq-dns-7fd796d7df-l6zl8" Dec 09 15:30:38 crc kubenswrapper[4716]: I1209 15:30:38.205744 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31f45c75-847f-4adc-8bdf-3438bf5f9a4c-config\") pod \"dnsmasq-dns-7fd796d7df-l6zl8\" (UID: \"31f45c75-847f-4adc-8bdf-3438bf5f9a4c\") " pod="openstack/dnsmasq-dns-7fd796d7df-l6zl8" Dec 09 15:30:38 crc kubenswrapper[4716]: I1209 15:30:38.223993 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nswhz\" (UniqueName: \"kubernetes.io/projected/31f45c75-847f-4adc-8bdf-3438bf5f9a4c-kube-api-access-nswhz\") pod \"dnsmasq-dns-7fd796d7df-l6zl8\" (UID: \"31f45c75-847f-4adc-8bdf-3438bf5f9a4c\") " pod="openstack/dnsmasq-dns-7fd796d7df-l6zl8" Dec 09 15:30:38 crc kubenswrapper[4716]: I1209 15:30:38.274341 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-l6zl8" Dec 09 15:30:52 crc kubenswrapper[4716]: E1209 15:30:52.516916 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Dec 09 15:30:52 crc kubenswrapper[4716]: E1209 15:30:52.517745 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7gc5f,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-galera-0_openstack(b0be2722-84d6-4885-80bc-a795d7f2c05e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 15:30:52 crc kubenswrapper[4716]: E1209 15:30:52.518980 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-galera-0" podUID="b0be2722-84d6-4885-80bc-a795d7f2c05e" Dec 09 15:30:52 crc kubenswrapper[4716]: E1209 15:30:52.530597 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Dec 09 15:30:52 crc kubenswrapper[4716]: E1209 15:30:52.530786 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5tvk2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-cell1-galera-0_openstack(c32f00d5-f870-4dc6-8387-81bfe37b06f8): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 15:30:52 crc kubenswrapper[4716]: E1209 15:30:52.531998 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-cell1-galera-0" podUID="c32f00d5-f870-4dc6-8387-81bfe37b06f8" Dec 09 15:30:53 crc kubenswrapper[4716]: E1209 15:30:53.261179 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-galera-0" podUID="b0be2722-84d6-4885-80bc-a795d7f2c05e" Dec 09 15:30:53 crc kubenswrapper[4716]: E1209 15:30:53.261329 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-cell1-galera-0" podUID="c32f00d5-f870-4dc6-8387-81bfe37b06f8" Dec 09 15:30:53 crc kubenswrapper[4716]: E1209 15:30:53.493968 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Dec 09 15:30:53 crc kubenswrapper[4716]: E1209 15:30:53.494188 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-25bbc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-0_openstack(2e140762-44f7-46f9-9bbe-a8f780186869): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 15:30:53 crc kubenswrapper[4716]: E1209 15:30:53.495404 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-0" podUID="2e140762-44f7-46f9-9bbe-a8f780186869" Dec 09 15:30:53 crc kubenswrapper[4716]: E1209 15:30:53.564376 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Dec 09 15:30:53 crc kubenswrapper[4716]: E1209 15:30:53.564588 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7p9l2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(f417726f-0022-42f5-bfe8-79f6605d557c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 15:30:53 crc kubenswrapper[4716]: E1209 15:30:53.565860 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="f417726f-0022-42f5-bfe8-79f6605d557c" Dec 09 15:30:54 crc kubenswrapper[4716]: E1209 15:30:54.271710 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-cell1-server-0" podUID="f417726f-0022-42f5-bfe8-79f6605d557c" Dec 09 15:30:54 crc kubenswrapper[4716]: E1209 15:30:54.272340 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-server-0" podUID="2e140762-44f7-46f9-9bbe-a8f780186869" Dec 09 15:30:57 crc kubenswrapper[4716]: E1209 15:30:57.781748 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified" Dec 09 15:30:57 crc kubenswrapper[4716]: E1209 15:30:57.782417 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ovn-controller,Image:quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified,Command:[ovn-controller --pidfile unix:/run/openvswitch/db.sock --certificate=/etc/pki/tls/certs/ovndb.crt --private-key=/etc/pki/tls/private/ovndb.key --ca-cert=/etc/pki/tls/certs/ovndbca.crt],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n647hd4h56hf9h558h559h8h544hdfh65ch564h578h689h9bh55ch54dh6fh67bhdch567hbch654h5c6h57bh694h5c7hc9h79h96h57bh98h656q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:var-run,ReadOnly:false,MountPath:/var/run/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-run-ovn,ReadOnly:false,MountPath:/var/run/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-log-ovn,ReadOnly:false,MountPath:/var/log/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-controller-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndb.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-controller-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovndb.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-controller-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mqfbq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/ovn_controller_liveness.sh],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:30,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/ovn_controller_readiness.sh],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:30,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/usr/share/ovn/scripts/ovn-ctl stop_controller],},HTTPGet:nil,TCPSocket:nil,Sleep:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[NET_ADMIN SYS_ADMIN SYS_NICE],Drop:[],},Privileged:*true,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-controller-8mm7h_openstack(890cdf81-6c51-4954-a7fc-ea6116941cfe): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 15:30:57 crc kubenswrapper[4716]: E1209 15:30:57.783963 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovn-controller\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovn-controller-8mm7h" podUID="890cdf81-6c51-4954-a7fc-ea6116941cfe" Dec 09 15:30:58 crc kubenswrapper[4716]: E1209 15:30:58.183958 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified" Dec 09 15:30:58 crc kubenswrapper[4716]: E1209 15:30:58.184458 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ovsdbserver-nb,Image:quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified,Command:[/usr/bin/dumb-init],Args:[/usr/local/bin/container-scripts/setup.sh],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n6ch575h555h565hf7h87h98h5f5h64bh68fh668h55ch64bh5d5h9h5bdh65bh689h584h54bh5fh698hdh59bh658hf5h54bhd6hf6h679h66fh674q,ValueFrom:nil,},EnvVar{Name:OVN_LOGDIR,Value:/tmp,ValueFrom:nil,},EnvVar{Name:OVN_RUNDIR,Value:/tmp,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovndbcluster-nb-etc-ovn,ReadOnly:false,MountPath:/etc/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdb-rundir,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndb.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovndb.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-dqm7k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/cleanup.sh],},HTTPGet:nil,TCPSocket:nil,Sleep:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:20,TerminationGracePeriodSeconds:nil,},ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovsdbserver-nb-0_openstack(fa255b9e-51c8-407a-aebe-606da43b9906): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 15:30:58 crc kubenswrapper[4716]: E1209 15:30:58.309224 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovn-controller\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified\\\"\"" pod="openstack/ovn-controller-8mm7h" podUID="890cdf81-6c51-4954-a7fc-ea6116941cfe" Dec 09 15:30:58 crc kubenswrapper[4716]: I1209 15:30:58.333246 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Dec 09 15:30:58 crc kubenswrapper[4716]: E1209 15:30:58.990912 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Dec 09 15:30:58 crc kubenswrapper[4716]: E1209 15:30:58.991500 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-vh6lb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-57d769cc4f-784v8_openstack(5e40d00b-b8ee-4cf9-859a-5e92c49182c6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 15:30:58 crc kubenswrapper[4716]: E1209 15:30:58.992693 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-57d769cc4f-784v8" podUID="5e40d00b-b8ee-4cf9-859a-5e92c49182c6" Dec 09 15:30:59 crc kubenswrapper[4716]: E1209 15:30:59.003381 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Dec 09 15:30:59 crc kubenswrapper[4716]: E1209 15:30:59.003546 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qmgrp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-p98qb_openstack(72871dec-a60a-4e54-86a0-be8f91c3fde3): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 15:30:59 crc kubenswrapper[4716]: E1209 15:30:59.004879 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-p98qb" podUID="72871dec-a60a-4e54-86a0-be8f91c3fde3" Dec 09 15:30:59 crc kubenswrapper[4716]: E1209 15:30:59.017370 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Dec 09 15:30:59 crc kubenswrapper[4716]: E1209 15:30:59.017560 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gtz9x,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-rhfr8_openstack(21ae3fb4-b9a2-43bd-9ce1-8235dcf4e42d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 15:30:59 crc kubenswrapper[4716]: E1209 15:30:59.019140 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-rhfr8" podUID="21ae3fb4-b9a2-43bd-9ce1-8235dcf4e42d" Dec 09 15:30:59 crc kubenswrapper[4716]: E1209 15:30:59.025505 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Dec 09 15:30:59 crc kubenswrapper[4716]: E1209 15:30:59.025820 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-v4jqq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-666b6646f7-h54jn_openstack(d166dd61-0ac4-4e96-88c6-59ac154db496): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 15:30:59 crc kubenswrapper[4716]: E1209 15:30:59.027069 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-666b6646f7-h54jn" podUID="d166dd61-0ac4-4e96-88c6-59ac154db496" Dec 09 15:30:59 crc kubenswrapper[4716]: I1209 15:30:59.316275 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"07a42bcd-0edf-4215-a2ce-e5b66b8c09d7","Type":"ContainerStarted","Data":"744dbeb2c79737a15305981c93707fc1626c4b75d7420bac962205caac2e57bb"} Dec 09 15:30:59 crc kubenswrapper[4716]: E1209 15:30:59.318101 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-57d769cc4f-784v8" podUID="5e40d00b-b8ee-4cf9-859a-5e92c49182c6" Dec 09 15:30:59 crc kubenswrapper[4716]: I1209 15:30:59.542237 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-g6rkt"] Dec 09 15:30:59 crc kubenswrapper[4716]: I1209 15:30:59.589915 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-l6zl8"] Dec 09 15:30:59 crc kubenswrapper[4716]: E1209 15:30:59.964931 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Dec 09 15:30:59 crc kubenswrapper[4716]: E1209 15:30:59.965003 4716 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Dec 09 15:30:59 crc kubenswrapper[4716]: E1209 15:30:59.965189 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-state-metrics,Image:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,Command:[],Args:[--resources=pods --namespaces=openstack],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:8080,Protocol:TCP,HostIP:,},ContainerPort{Name:telemetry,HostPort:0,ContainerPort:8081,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wf69k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/livez,Port:{0 8080 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod kube-state-metrics-0_openstack(8cd62374-51f5-4445-94f2-3d575475d8e6): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 09 15:30:59 crc kubenswrapper[4716]: E1209 15:30:59.966500 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openstack/kube-state-metrics-0" podUID="8cd62374-51f5-4445-94f2-3d575475d8e6" Dec 09 15:30:59 crc kubenswrapper[4716]: W1209 15:30:59.968484 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1fa68337_088e_40a6_8cc4_3b2b0947f959.slice/crio-93f63c570a63fdc3a0743a67de61fcf193d30591da15b910cd54b57518c98a55 WatchSource:0}: Error finding container 93f63c570a63fdc3a0743a67de61fcf193d30591da15b910cd54b57518c98a55: Status 404 returned error can't find the container with id 93f63c570a63fdc3a0743a67de61fcf193d30591da15b910cd54b57518c98a55 Dec 09 15:30:59 crc kubenswrapper[4716]: W1209 15:30:59.992190 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod31f45c75_847f_4adc_8bdf_3438bf5f9a4c.slice/crio-ef645e20eee874884d4a2dd91862c778f7ba28b1c1f979c252b63b8d5284f37e WatchSource:0}: Error finding container ef645e20eee874884d4a2dd91862c778f7ba28b1c1f979c252b63b8d5284f37e: Status 404 returned error can't find the container with id ef645e20eee874884d4a2dd91862c778f7ba28b1c1f979c252b63b8d5284f37e Dec 09 15:31:00 crc kubenswrapper[4716]: I1209 15:31:00.267205 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-rhfr8" Dec 09 15:31:00 crc kubenswrapper[4716]: I1209 15:31:00.326718 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-h54jn" event={"ID":"d166dd61-0ac4-4e96-88c6-59ac154db496","Type":"ContainerDied","Data":"5170dcf69e7061f2c122ef73bb6d5f7c56addb36e426e8532d5d7091c64aab24"} Dec 09 15:31:00 crc kubenswrapper[4716]: I1209 15:31:00.326782 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5170dcf69e7061f2c122ef73bb6d5f7c56addb36e426e8532d5d7091c64aab24" Dec 09 15:31:00 crc kubenswrapper[4716]: I1209 15:31:00.328111 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-p98qb" event={"ID":"72871dec-a60a-4e54-86a0-be8f91c3fde3","Type":"ContainerDied","Data":"23e5539cdddcae1c1046e5bbca2db657f5586e731f10ef44614446b39689008d"} Dec 09 15:31:00 crc kubenswrapper[4716]: I1209 15:31:00.328157 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="23e5539cdddcae1c1046e5bbca2db657f5586e731f10ef44614446b39689008d" Dec 09 15:31:00 crc kubenswrapper[4716]: I1209 15:31:00.330042 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-l6zl8" event={"ID":"31f45c75-847f-4adc-8bdf-3438bf5f9a4c","Type":"ContainerStarted","Data":"ef645e20eee874884d4a2dd91862c778f7ba28b1c1f979c252b63b8d5284f37e"} Dec 09 15:31:00 crc kubenswrapper[4716]: I1209 15:31:00.331038 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-rhfr8" event={"ID":"21ae3fb4-b9a2-43bd-9ce1-8235dcf4e42d","Type":"ContainerDied","Data":"6300caae40fb38bcb4696a7e5f12a69bca2aff8151bb1e78355498dd208f0ee5"} Dec 09 15:31:00 crc kubenswrapper[4716]: I1209 15:31:00.331179 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-rhfr8" Dec 09 15:31:00 crc kubenswrapper[4716]: I1209 15:31:00.334374 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-g6rkt" event={"ID":"1fa68337-088e-40a6-8cc4-3b2b0947f959","Type":"ContainerStarted","Data":"93f63c570a63fdc3a0743a67de61fcf193d30591da15b910cd54b57518c98a55"} Dec 09 15:31:00 crc kubenswrapper[4716]: E1209 15:31:00.335753 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0\\\"\"" pod="openstack/kube-state-metrics-0" podUID="8cd62374-51f5-4445-94f2-3d575475d8e6" Dec 09 15:31:00 crc kubenswrapper[4716]: I1209 15:31:00.368708 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gtz9x\" (UniqueName: \"kubernetes.io/projected/21ae3fb4-b9a2-43bd-9ce1-8235dcf4e42d-kube-api-access-gtz9x\") pod \"21ae3fb4-b9a2-43bd-9ce1-8235dcf4e42d\" (UID: \"21ae3fb4-b9a2-43bd-9ce1-8235dcf4e42d\") " Dec 09 15:31:00 crc kubenswrapper[4716]: I1209 15:31:00.369046 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/21ae3fb4-b9a2-43bd-9ce1-8235dcf4e42d-config\") pod \"21ae3fb4-b9a2-43bd-9ce1-8235dcf4e42d\" (UID: \"21ae3fb4-b9a2-43bd-9ce1-8235dcf4e42d\") " Dec 09 15:31:00 crc kubenswrapper[4716]: I1209 15:31:00.369451 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/21ae3fb4-b9a2-43bd-9ce1-8235dcf4e42d-config" (OuterVolumeSpecName: "config") pod "21ae3fb4-b9a2-43bd-9ce1-8235dcf4e42d" (UID: "21ae3fb4-b9a2-43bd-9ce1-8235dcf4e42d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:31:00 crc kubenswrapper[4716]: I1209 15:31:00.370650 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/21ae3fb4-b9a2-43bd-9ce1-8235dcf4e42d-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:00 crc kubenswrapper[4716]: I1209 15:31:00.373729 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/21ae3fb4-b9a2-43bd-9ce1-8235dcf4e42d-kube-api-access-gtz9x" (OuterVolumeSpecName: "kube-api-access-gtz9x") pod "21ae3fb4-b9a2-43bd-9ce1-8235dcf4e42d" (UID: "21ae3fb4-b9a2-43bd-9ce1-8235dcf4e42d"). InnerVolumeSpecName "kube-api-access-gtz9x". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:31:00 crc kubenswrapper[4716]: I1209 15:31:00.443277 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-h54jn" Dec 09 15:31:00 crc kubenswrapper[4716]: I1209 15:31:00.473025 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gtz9x\" (UniqueName: \"kubernetes.io/projected/21ae3fb4-b9a2-43bd-9ce1-8235dcf4e42d-kube-api-access-gtz9x\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:00 crc kubenswrapper[4716]: I1209 15:31:00.492292 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-p98qb" Dec 09 15:31:00 crc kubenswrapper[4716]: I1209 15:31:00.573816 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/72871dec-a60a-4e54-86a0-be8f91c3fde3-dns-svc\") pod \"72871dec-a60a-4e54-86a0-be8f91c3fde3\" (UID: \"72871dec-a60a-4e54-86a0-be8f91c3fde3\") " Dec 09 15:31:00 crc kubenswrapper[4716]: I1209 15:31:00.573865 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72871dec-a60a-4e54-86a0-be8f91c3fde3-config\") pod \"72871dec-a60a-4e54-86a0-be8f91c3fde3\" (UID: \"72871dec-a60a-4e54-86a0-be8f91c3fde3\") " Dec 09 15:31:00 crc kubenswrapper[4716]: I1209 15:31:00.573940 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v4jqq\" (UniqueName: \"kubernetes.io/projected/d166dd61-0ac4-4e96-88c6-59ac154db496-kube-api-access-v4jqq\") pod \"d166dd61-0ac4-4e96-88c6-59ac154db496\" (UID: \"d166dd61-0ac4-4e96-88c6-59ac154db496\") " Dec 09 15:31:00 crc kubenswrapper[4716]: I1209 15:31:00.574154 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d166dd61-0ac4-4e96-88c6-59ac154db496-config\") pod \"d166dd61-0ac4-4e96-88c6-59ac154db496\" (UID: \"d166dd61-0ac4-4e96-88c6-59ac154db496\") " Dec 09 15:31:00 crc kubenswrapper[4716]: I1209 15:31:00.574333 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qmgrp\" (UniqueName: \"kubernetes.io/projected/72871dec-a60a-4e54-86a0-be8f91c3fde3-kube-api-access-qmgrp\") pod \"72871dec-a60a-4e54-86a0-be8f91c3fde3\" (UID: \"72871dec-a60a-4e54-86a0-be8f91c3fde3\") " Dec 09 15:31:00 crc kubenswrapper[4716]: I1209 15:31:00.574395 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/72871dec-a60a-4e54-86a0-be8f91c3fde3-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "72871dec-a60a-4e54-86a0-be8f91c3fde3" (UID: "72871dec-a60a-4e54-86a0-be8f91c3fde3"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:31:00 crc kubenswrapper[4716]: I1209 15:31:00.574473 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/72871dec-a60a-4e54-86a0-be8f91c3fde3-config" (OuterVolumeSpecName: "config") pod "72871dec-a60a-4e54-86a0-be8f91c3fde3" (UID: "72871dec-a60a-4e54-86a0-be8f91c3fde3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:31:00 crc kubenswrapper[4716]: I1209 15:31:00.574581 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d166dd61-0ac4-4e96-88c6-59ac154db496-config" (OuterVolumeSpecName: "config") pod "d166dd61-0ac4-4e96-88c6-59ac154db496" (UID: "d166dd61-0ac4-4e96-88c6-59ac154db496"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:31:00 crc kubenswrapper[4716]: I1209 15:31:00.575123 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d166dd61-0ac4-4e96-88c6-59ac154db496-dns-svc\") pod \"d166dd61-0ac4-4e96-88c6-59ac154db496\" (UID: \"d166dd61-0ac4-4e96-88c6-59ac154db496\") " Dec 09 15:31:00 crc kubenswrapper[4716]: I1209 15:31:00.575788 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d166dd61-0ac4-4e96-88c6-59ac154db496-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d166dd61-0ac4-4e96-88c6-59ac154db496" (UID: "d166dd61-0ac4-4e96-88c6-59ac154db496"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:31:00 crc kubenswrapper[4716]: I1209 15:31:00.576326 4716 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d166dd61-0ac4-4e96-88c6-59ac154db496-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:00 crc kubenswrapper[4716]: I1209 15:31:00.576354 4716 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/72871dec-a60a-4e54-86a0-be8f91c3fde3-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:00 crc kubenswrapper[4716]: I1209 15:31:00.576366 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72871dec-a60a-4e54-86a0-be8f91c3fde3-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:00 crc kubenswrapper[4716]: I1209 15:31:00.576378 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d166dd61-0ac4-4e96-88c6-59ac154db496-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:00 crc kubenswrapper[4716]: I1209 15:31:00.579213 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d166dd61-0ac4-4e96-88c6-59ac154db496-kube-api-access-v4jqq" (OuterVolumeSpecName: "kube-api-access-v4jqq") pod "d166dd61-0ac4-4e96-88c6-59ac154db496" (UID: "d166dd61-0ac4-4e96-88c6-59ac154db496"). InnerVolumeSpecName "kube-api-access-v4jqq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:31:00 crc kubenswrapper[4716]: I1209 15:31:00.591583 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/72871dec-a60a-4e54-86a0-be8f91c3fde3-kube-api-access-qmgrp" (OuterVolumeSpecName: "kube-api-access-qmgrp") pod "72871dec-a60a-4e54-86a0-be8f91c3fde3" (UID: "72871dec-a60a-4e54-86a0-be8f91c3fde3"). InnerVolumeSpecName "kube-api-access-qmgrp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:31:00 crc kubenswrapper[4716]: I1209 15:31:00.678572 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qmgrp\" (UniqueName: \"kubernetes.io/projected/72871dec-a60a-4e54-86a0-be8f91c3fde3-kube-api-access-qmgrp\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:00 crc kubenswrapper[4716]: I1209 15:31:00.678631 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v4jqq\" (UniqueName: \"kubernetes.io/projected/d166dd61-0ac4-4e96-88c6-59ac154db496-kube-api-access-v4jqq\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:00 crc kubenswrapper[4716]: I1209 15:31:00.728726 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-rhfr8"] Dec 09 15:31:00 crc kubenswrapper[4716]: I1209 15:31:00.742358 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-rhfr8"] Dec 09 15:31:01 crc kubenswrapper[4716]: I1209 15:31:01.228820 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="21ae3fb4-b9a2-43bd-9ce1-8235dcf4e42d" path="/var/lib/kubelet/pods/21ae3fb4-b9a2-43bd-9ce1-8235dcf4e42d/volumes" Dec 09 15:31:01 crc kubenswrapper[4716]: I1209 15:31:01.259746 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f76b57f8b-ltlwj" podUID="f2721f4c-09da-4cd3-a266-8df2c3aca030" containerName="console" containerID="cri-o://3529d187f9476fc1171f80aae8304a6ec3bd9c2a4c2d383865646d8ece314e67" gracePeriod=15 Dec 09 15:31:01 crc kubenswrapper[4716]: I1209 15:31:01.344866 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-226pf" event={"ID":"0ea0c2c0-b980-461f-9f94-38eb8630a830","Type":"ContainerStarted","Data":"873fe73894b63b84a74ddca325d178852819e10d2b968d7f95dc8491f010d5e0"} Dec 09 15:31:01 crc kubenswrapper[4716]: I1209 15:31:01.347025 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"67d51dc9-9087-4d11-9f1f-0947af797f5d","Type":"ContainerStarted","Data":"70aa0f2f419a237833487a906a751d183e0130832018d6b04970faf7a4fcc156"} Dec 09 15:31:01 crc kubenswrapper[4716]: I1209 15:31:01.347372 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Dec 09 15:31:01 crc kubenswrapper[4716]: I1209 15:31:01.349497 4716 generic.go:334] "Generic (PLEG): container finished" podID="6f4610df-19db-432c-805c-2e8b52e5b344" containerID="27a4a9518a046f7dc29e669299f5093da13794eb5f8a14d22f754fe41268eb50" exitCode=0 Dec 09 15:31:01 crc kubenswrapper[4716]: I1209 15:31:01.349553 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-pf2c7" event={"ID":"6f4610df-19db-432c-805c-2e8b52e5b344","Type":"ContainerDied","Data":"27a4a9518a046f7dc29e669299f5093da13794eb5f8a14d22f754fe41268eb50"} Dec 09 15:31:01 crc kubenswrapper[4716]: I1209 15:31:01.349568 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-h54jn" Dec 09 15:31:01 crc kubenswrapper[4716]: I1209 15:31:01.349751 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-p98qb" Dec 09 15:31:01 crc kubenswrapper[4716]: I1209 15:31:01.369959 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-226pf" podStartSLOduration=11.253958794 podStartE2EDuration="37.36993101s" podCreationTimestamp="2025-12-09 15:30:24 +0000 UTC" firstStartedPulling="2025-12-09 15:30:27.412203197 +0000 UTC m=+1314.566947185" lastFinishedPulling="2025-12-09 15:30:53.528175413 +0000 UTC m=+1340.682919401" observedRunningTime="2025-12-09 15:31:01.360078387 +0000 UTC m=+1348.514822375" watchObservedRunningTime="2025-12-09 15:31:01.36993101 +0000 UTC m=+1348.524674998" Dec 09 15:31:01 crc kubenswrapper[4716]: I1209 15:31:01.493146 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-p98qb"] Dec 09 15:31:01 crc kubenswrapper[4716]: I1209 15:31:01.504969 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-p98qb"] Dec 09 15:31:01 crc kubenswrapper[4716]: I1209 15:31:01.514260 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=6.092423549 podStartE2EDuration="42.514231499s" podCreationTimestamp="2025-12-09 15:30:19 +0000 UTC" firstStartedPulling="2025-12-09 15:30:21.336682504 +0000 UTC m=+1308.491426492" lastFinishedPulling="2025-12-09 15:30:57.758490444 +0000 UTC m=+1344.913234442" observedRunningTime="2025-12-09 15:31:01.458584936 +0000 UTC m=+1348.613328944" watchObservedRunningTime="2025-12-09 15:31:01.514231499 +0000 UTC m=+1348.668975497" Dec 09 15:31:01 crc kubenswrapper[4716]: I1209 15:31:01.533289 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-h54jn"] Dec 09 15:31:01 crc kubenswrapper[4716]: I1209 15:31:01.541427 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-h54jn"] Dec 09 15:31:02 crc kubenswrapper[4716]: I1209 15:31:02.362248 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f76b57f8b-ltlwj_f2721f4c-09da-4cd3-a266-8df2c3aca030/console/0.log" Dec 09 15:31:02 crc kubenswrapper[4716]: I1209 15:31:02.362515 4716 generic.go:334] "Generic (PLEG): container finished" podID="f2721f4c-09da-4cd3-a266-8df2c3aca030" containerID="3529d187f9476fc1171f80aae8304a6ec3bd9c2a4c2d383865646d8ece314e67" exitCode=2 Dec 09 15:31:02 crc kubenswrapper[4716]: I1209 15:31:02.362604 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f76b57f8b-ltlwj" event={"ID":"f2721f4c-09da-4cd3-a266-8df2c3aca030","Type":"ContainerDied","Data":"3529d187f9476fc1171f80aae8304a6ec3bd9c2a4c2d383865646d8ece314e67"} Dec 09 15:31:02 crc kubenswrapper[4716]: I1209 15:31:02.897488 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f76b57f8b-ltlwj_f2721f4c-09da-4cd3-a266-8df2c3aca030/console/0.log" Dec 09 15:31:02 crc kubenswrapper[4716]: I1209 15:31:02.898038 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f76b57f8b-ltlwj" Dec 09 15:31:03 crc kubenswrapper[4716]: I1209 15:31:03.056318 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f2721f4c-09da-4cd3-a266-8df2c3aca030-service-ca\") pod \"f2721f4c-09da-4cd3-a266-8df2c3aca030\" (UID: \"f2721f4c-09da-4cd3-a266-8df2c3aca030\") " Dec 09 15:31:03 crc kubenswrapper[4716]: I1209 15:31:03.056420 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f2721f4c-09da-4cd3-a266-8df2c3aca030-trusted-ca-bundle\") pod \"f2721f4c-09da-4cd3-a266-8df2c3aca030\" (UID: \"f2721f4c-09da-4cd3-a266-8df2c3aca030\") " Dec 09 15:31:03 crc kubenswrapper[4716]: I1209 15:31:03.056504 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f2721f4c-09da-4cd3-a266-8df2c3aca030-console-serving-cert\") pod \"f2721f4c-09da-4cd3-a266-8df2c3aca030\" (UID: \"f2721f4c-09da-4cd3-a266-8df2c3aca030\") " Dec 09 15:31:03 crc kubenswrapper[4716]: I1209 15:31:03.056558 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rbxqz\" (UniqueName: \"kubernetes.io/projected/f2721f4c-09da-4cd3-a266-8df2c3aca030-kube-api-access-rbxqz\") pod \"f2721f4c-09da-4cd3-a266-8df2c3aca030\" (UID: \"f2721f4c-09da-4cd3-a266-8df2c3aca030\") " Dec 09 15:31:03 crc kubenswrapper[4716]: I1209 15:31:03.056664 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f2721f4c-09da-4cd3-a266-8df2c3aca030-console-config\") pod \"f2721f4c-09da-4cd3-a266-8df2c3aca030\" (UID: \"f2721f4c-09da-4cd3-a266-8df2c3aca030\") " Dec 09 15:31:03 crc kubenswrapper[4716]: I1209 15:31:03.056759 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f2721f4c-09da-4cd3-a266-8df2c3aca030-oauth-serving-cert\") pod \"f2721f4c-09da-4cd3-a266-8df2c3aca030\" (UID: \"f2721f4c-09da-4cd3-a266-8df2c3aca030\") " Dec 09 15:31:03 crc kubenswrapper[4716]: I1209 15:31:03.056862 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f2721f4c-09da-4cd3-a266-8df2c3aca030-console-oauth-config\") pod \"f2721f4c-09da-4cd3-a266-8df2c3aca030\" (UID: \"f2721f4c-09da-4cd3-a266-8df2c3aca030\") " Dec 09 15:31:03 crc kubenswrapper[4716]: I1209 15:31:03.057243 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f2721f4c-09da-4cd3-a266-8df2c3aca030-service-ca" (OuterVolumeSpecName: "service-ca") pod "f2721f4c-09da-4cd3-a266-8df2c3aca030" (UID: "f2721f4c-09da-4cd3-a266-8df2c3aca030"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:31:03 crc kubenswrapper[4716]: I1209 15:31:03.057285 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f2721f4c-09da-4cd3-a266-8df2c3aca030-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "f2721f4c-09da-4cd3-a266-8df2c3aca030" (UID: "f2721f4c-09da-4cd3-a266-8df2c3aca030"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:31:03 crc kubenswrapper[4716]: I1209 15:31:03.057348 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f2721f4c-09da-4cd3-a266-8df2c3aca030-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "f2721f4c-09da-4cd3-a266-8df2c3aca030" (UID: "f2721f4c-09da-4cd3-a266-8df2c3aca030"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:31:03 crc kubenswrapper[4716]: I1209 15:31:03.057364 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f2721f4c-09da-4cd3-a266-8df2c3aca030-console-config" (OuterVolumeSpecName: "console-config") pod "f2721f4c-09da-4cd3-a266-8df2c3aca030" (UID: "f2721f4c-09da-4cd3-a266-8df2c3aca030"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:31:03 crc kubenswrapper[4716]: I1209 15:31:03.057440 4716 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f2721f4c-09da-4cd3-a266-8df2c3aca030-service-ca\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:03 crc kubenswrapper[4716]: I1209 15:31:03.158949 4716 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f2721f4c-09da-4cd3-a266-8df2c3aca030-console-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:03 crc kubenswrapper[4716]: I1209 15:31:03.158989 4716 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f2721f4c-09da-4cd3-a266-8df2c3aca030-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:03 crc kubenswrapper[4716]: I1209 15:31:03.159009 4716 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f2721f4c-09da-4cd3-a266-8df2c3aca030-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:03 crc kubenswrapper[4716]: I1209 15:31:03.225323 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2721f4c-09da-4cd3-a266-8df2c3aca030-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "f2721f4c-09da-4cd3-a266-8df2c3aca030" (UID: "f2721f4c-09da-4cd3-a266-8df2c3aca030"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:31:03 crc kubenswrapper[4716]: I1209 15:31:03.228644 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2721f4c-09da-4cd3-a266-8df2c3aca030-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "f2721f4c-09da-4cd3-a266-8df2c3aca030" (UID: "f2721f4c-09da-4cd3-a266-8df2c3aca030"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:31:03 crc kubenswrapper[4716]: I1209 15:31:03.228976 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f2721f4c-09da-4cd3-a266-8df2c3aca030-kube-api-access-rbxqz" (OuterVolumeSpecName: "kube-api-access-rbxqz") pod "f2721f4c-09da-4cd3-a266-8df2c3aca030" (UID: "f2721f4c-09da-4cd3-a266-8df2c3aca030"). InnerVolumeSpecName "kube-api-access-rbxqz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:31:03 crc kubenswrapper[4716]: I1209 15:31:03.228717 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="72871dec-a60a-4e54-86a0-be8f91c3fde3" path="/var/lib/kubelet/pods/72871dec-a60a-4e54-86a0-be8f91c3fde3/volumes" Dec 09 15:31:03 crc kubenswrapper[4716]: I1209 15:31:03.232838 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d166dd61-0ac4-4e96-88c6-59ac154db496" path="/var/lib/kubelet/pods/d166dd61-0ac4-4e96-88c6-59ac154db496/volumes" Dec 09 15:31:03 crc kubenswrapper[4716]: I1209 15:31:03.265419 4716 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f2721f4c-09da-4cd3-a266-8df2c3aca030-console-oauth-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:03 crc kubenswrapper[4716]: I1209 15:31:03.265482 4716 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f2721f4c-09da-4cd3-a266-8df2c3aca030-console-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:03 crc kubenswrapper[4716]: I1209 15:31:03.265495 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rbxqz\" (UniqueName: \"kubernetes.io/projected/f2721f4c-09da-4cd3-a266-8df2c3aca030-kube-api-access-rbxqz\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:03 crc kubenswrapper[4716]: E1209 15:31:03.296853 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdbserver-nb\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovsdbserver-nb-0" podUID="fa255b9e-51c8-407a-aebe-606da43b9906" Dec 09 15:31:03 crc kubenswrapper[4716]: I1209 15:31:03.373312 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f76b57f8b-ltlwj_f2721f4c-09da-4cd3-a266-8df2c3aca030/console/0.log" Dec 09 15:31:03 crc kubenswrapper[4716]: I1209 15:31:03.373417 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f76b57f8b-ltlwj" event={"ID":"f2721f4c-09da-4cd3-a266-8df2c3aca030","Type":"ContainerDied","Data":"73046e778b7911ce8a84efe08993412ec9dcf2b8b2bb767f07ad208ef24e5495"} Dec 09 15:31:03 crc kubenswrapper[4716]: I1209 15:31:03.373458 4716 scope.go:117] "RemoveContainer" containerID="3529d187f9476fc1171f80aae8304a6ec3bd9c2a4c2d383865646d8ece314e67" Dec 09 15:31:03 crc kubenswrapper[4716]: I1209 15:31:03.373503 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f76b57f8b-ltlwj" Dec 09 15:31:03 crc kubenswrapper[4716]: I1209 15:31:03.375884 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"fa255b9e-51c8-407a-aebe-606da43b9906","Type":"ContainerStarted","Data":"626ab76c28cacb2f549b2061abd91051203b2346010e9e08e668718c331aa4cb"} Dec 09 15:31:03 crc kubenswrapper[4716]: E1209 15:31:03.377525 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdbserver-nb\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified\\\"\"" pod="openstack/ovsdbserver-nb-0" podUID="fa255b9e-51c8-407a-aebe-606da43b9906" Dec 09 15:31:03 crc kubenswrapper[4716]: I1209 15:31:03.438121 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f76b57f8b-ltlwj"] Dec 09 15:31:03 crc kubenswrapper[4716]: I1209 15:31:03.446282 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f76b57f8b-ltlwj"] Dec 09 15:31:03 crc kubenswrapper[4716]: I1209 15:31:03.563535 4716 patch_prober.go:28] interesting pod/console-f76b57f8b-ltlwj container/console namespace/openshift-console: Readiness probe status=failure output="Get \"https://10.217.0.90:8443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 09 15:31:03 crc kubenswrapper[4716]: I1209 15:31:03.563642 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/console-f76b57f8b-ltlwj" podUID="f2721f4c-09da-4cd3-a266-8df2c3aca030" containerName="console" probeResult="failure" output="Get \"https://10.217.0.90:8443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 09 15:31:04 crc kubenswrapper[4716]: I1209 15:31:04.388903 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"07a42bcd-0edf-4215-a2ce-e5b66b8c09d7","Type":"ContainerStarted","Data":"c4aab1068fe48f6e595b3b8f7e0e86a72247da10184565dc41eef70c20e6df1f"} Dec 09 15:31:04 crc kubenswrapper[4716]: I1209 15:31:04.389326 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"07a42bcd-0edf-4215-a2ce-e5b66b8c09d7","Type":"ContainerStarted","Data":"fb4265a49b2c67c79102eaef5d422454cc0ff2788a180e8c701ca3a8beed8737"} Dec 09 15:31:04 crc kubenswrapper[4716]: I1209 15:31:04.392246 4716 generic.go:334] "Generic (PLEG): container finished" podID="31f45c75-847f-4adc-8bdf-3438bf5f9a4c" containerID="aa481eeebdcda48756a0218836d53c906c8690e011581e52b182ec8a76808edc" exitCode=0 Dec 09 15:31:04 crc kubenswrapper[4716]: I1209 15:31:04.392297 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-l6zl8" event={"ID":"31f45c75-847f-4adc-8bdf-3438bf5f9a4c","Type":"ContainerDied","Data":"aa481eeebdcda48756a0218836d53c906c8690e011581e52b182ec8a76808edc"} Dec 09 15:31:04 crc kubenswrapper[4716]: I1209 15:31:04.397005 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-g6rkt" event={"ID":"1fa68337-088e-40a6-8cc4-3b2b0947f959","Type":"ContainerStarted","Data":"18fcf0d065886928a84e2c5dd774d78b2b3ea5d23c3a78ce77aef5d8ed43179a"} Dec 09 15:31:04 crc kubenswrapper[4716]: I1209 15:31:04.403020 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-pf2c7" event={"ID":"6f4610df-19db-432c-805c-2e8b52e5b344","Type":"ContainerStarted","Data":"369c22dfff57407bc7e397c00538aff6bc0c245a417d56935d525ff539934a2d"} Dec 09 15:31:04 crc kubenswrapper[4716]: I1209 15:31:04.403377 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-pf2c7" event={"ID":"6f4610df-19db-432c-805c-2e8b52e5b344","Type":"ContainerStarted","Data":"782f07df4de8728d506d4baec4eaf8ca6d569389a3c7c493210645eb179aa6d0"} Dec 09 15:31:04 crc kubenswrapper[4716]: I1209 15:31:04.404035 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-pf2c7" Dec 09 15:31:04 crc kubenswrapper[4716]: I1209 15:31:04.404058 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-pf2c7" Dec 09 15:31:04 crc kubenswrapper[4716]: I1209 15:31:04.409608 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"be971cf7-04bf-4487-a95a-64bb6ea739a7","Type":"ContainerStarted","Data":"495261baab8203428ad1e59ee3fec94e586c67c04630e4401708f91e3200401e"} Dec 09 15:31:04 crc kubenswrapper[4716]: E1209 15:31:04.411639 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdbserver-nb\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified\\\"\"" pod="openstack/ovsdbserver-nb-0" podUID="fa255b9e-51c8-407a-aebe-606da43b9906" Dec 09 15:31:04 crc kubenswrapper[4716]: I1209 15:31:04.431127 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=34.027861192 podStartE2EDuration="37.431106488s" podCreationTimestamp="2025-12-09 15:30:27 +0000 UTC" firstStartedPulling="2025-12-09 15:30:59.017430169 +0000 UTC m=+1346.172174157" lastFinishedPulling="2025-12-09 15:31:02.420675465 +0000 UTC m=+1349.575419453" observedRunningTime="2025-12-09 15:31:04.421314755 +0000 UTC m=+1351.576058743" watchObservedRunningTime="2025-12-09 15:31:04.431106488 +0000 UTC m=+1351.585850476" Dec 09 15:31:04 crc kubenswrapper[4716]: I1209 15:31:04.457842 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-pf2c7" podStartSLOduration=14.466769165 podStartE2EDuration="39.457811527s" podCreationTimestamp="2025-12-09 15:30:25 +0000 UTC" firstStartedPulling="2025-12-09 15:30:32.766955228 +0000 UTC m=+1319.921699216" lastFinishedPulling="2025-12-09 15:30:57.75799759 +0000 UTC m=+1344.912741578" observedRunningTime="2025-12-09 15:31:04.443152845 +0000 UTC m=+1351.597896833" watchObservedRunningTime="2025-12-09 15:31:04.457811527 +0000 UTC m=+1351.612555515" Dec 09 15:31:04 crc kubenswrapper[4716]: I1209 15:31:04.505589 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-g6rkt" podStartSLOduration=25.012689235 podStartE2EDuration="27.505556853s" podCreationTimestamp="2025-12-09 15:30:37 +0000 UTC" firstStartedPulling="2025-12-09 15:30:59.985656364 +0000 UTC m=+1347.140400342" lastFinishedPulling="2025-12-09 15:31:02.478523972 +0000 UTC m=+1349.633267960" observedRunningTime="2025-12-09 15:31:04.497194242 +0000 UTC m=+1351.651938230" watchObservedRunningTime="2025-12-09 15:31:04.505556853 +0000 UTC m=+1351.660300841" Dec 09 15:31:04 crc kubenswrapper[4716]: I1209 15:31:04.641837 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Dec 09 15:31:04 crc kubenswrapper[4716]: I1209 15:31:04.967569 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-784v8"] Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.030096 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-x9c54"] Dec 09 15:31:05 crc kubenswrapper[4716]: E1209 15:31:05.030640 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2721f4c-09da-4cd3-a266-8df2c3aca030" containerName="console" Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.030659 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2721f4c-09da-4cd3-a266-8df2c3aca030" containerName="console" Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.030911 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2721f4c-09da-4cd3-a266-8df2c3aca030" containerName="console" Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.032282 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-x9c54" Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.036364 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.041558 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-x9c54"] Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.102634 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c63bdfde-4ca8-4048-8508-a7be505e25f0-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-x9c54\" (UID: \"c63bdfde-4ca8-4048-8508-a7be505e25f0\") " pod="openstack/dnsmasq-dns-86db49b7ff-x9c54" Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.102811 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c63bdfde-4ca8-4048-8508-a7be505e25f0-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-x9c54\" (UID: \"c63bdfde-4ca8-4048-8508-a7be505e25f0\") " pod="openstack/dnsmasq-dns-86db49b7ff-x9c54" Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.102842 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5grtd\" (UniqueName: \"kubernetes.io/projected/c63bdfde-4ca8-4048-8508-a7be505e25f0-kube-api-access-5grtd\") pod \"dnsmasq-dns-86db49b7ff-x9c54\" (UID: \"c63bdfde-4ca8-4048-8508-a7be505e25f0\") " pod="openstack/dnsmasq-dns-86db49b7ff-x9c54" Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.102886 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c63bdfde-4ca8-4048-8508-a7be505e25f0-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-x9c54\" (UID: \"c63bdfde-4ca8-4048-8508-a7be505e25f0\") " pod="openstack/dnsmasq-dns-86db49b7ff-x9c54" Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.102963 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c63bdfde-4ca8-4048-8508-a7be505e25f0-config\") pod \"dnsmasq-dns-86db49b7ff-x9c54\" (UID: \"c63bdfde-4ca8-4048-8508-a7be505e25f0\") " pod="openstack/dnsmasq-dns-86db49b7ff-x9c54" Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.205300 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c63bdfde-4ca8-4048-8508-a7be505e25f0-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-x9c54\" (UID: \"c63bdfde-4ca8-4048-8508-a7be505e25f0\") " pod="openstack/dnsmasq-dns-86db49b7ff-x9c54" Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.205411 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c63bdfde-4ca8-4048-8508-a7be505e25f0-config\") pod \"dnsmasq-dns-86db49b7ff-x9c54\" (UID: \"c63bdfde-4ca8-4048-8508-a7be505e25f0\") " pod="openstack/dnsmasq-dns-86db49b7ff-x9c54" Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.205457 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c63bdfde-4ca8-4048-8508-a7be505e25f0-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-x9c54\" (UID: \"c63bdfde-4ca8-4048-8508-a7be505e25f0\") " pod="openstack/dnsmasq-dns-86db49b7ff-x9c54" Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.205535 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c63bdfde-4ca8-4048-8508-a7be505e25f0-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-x9c54\" (UID: \"c63bdfde-4ca8-4048-8508-a7be505e25f0\") " pod="openstack/dnsmasq-dns-86db49b7ff-x9c54" Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.205555 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5grtd\" (UniqueName: \"kubernetes.io/projected/c63bdfde-4ca8-4048-8508-a7be505e25f0-kube-api-access-5grtd\") pod \"dnsmasq-dns-86db49b7ff-x9c54\" (UID: \"c63bdfde-4ca8-4048-8508-a7be505e25f0\") " pod="openstack/dnsmasq-dns-86db49b7ff-x9c54" Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.206380 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c63bdfde-4ca8-4048-8508-a7be505e25f0-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-x9c54\" (UID: \"c63bdfde-4ca8-4048-8508-a7be505e25f0\") " pod="openstack/dnsmasq-dns-86db49b7ff-x9c54" Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.206788 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c63bdfde-4ca8-4048-8508-a7be505e25f0-config\") pod \"dnsmasq-dns-86db49b7ff-x9c54\" (UID: \"c63bdfde-4ca8-4048-8508-a7be505e25f0\") " pod="openstack/dnsmasq-dns-86db49b7ff-x9c54" Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.206957 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c63bdfde-4ca8-4048-8508-a7be505e25f0-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-x9c54\" (UID: \"c63bdfde-4ca8-4048-8508-a7be505e25f0\") " pod="openstack/dnsmasq-dns-86db49b7ff-x9c54" Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.207631 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c63bdfde-4ca8-4048-8508-a7be505e25f0-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-x9c54\" (UID: \"c63bdfde-4ca8-4048-8508-a7be505e25f0\") " pod="openstack/dnsmasq-dns-86db49b7ff-x9c54" Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.234159 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5grtd\" (UniqueName: \"kubernetes.io/projected/c63bdfde-4ca8-4048-8508-a7be505e25f0-kube-api-access-5grtd\") pod \"dnsmasq-dns-86db49b7ff-x9c54\" (UID: \"c63bdfde-4ca8-4048-8508-a7be505e25f0\") " pod="openstack/dnsmasq-dns-86db49b7ff-x9c54" Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.247653 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f2721f4c-09da-4cd3-a266-8df2c3aca030" path="/var/lib/kubelet/pods/f2721f4c-09da-4cd3-a266-8df2c3aca030/volumes" Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.350986 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-784v8" Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.360127 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-x9c54" Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.410564 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vh6lb\" (UniqueName: \"kubernetes.io/projected/5e40d00b-b8ee-4cf9-859a-5e92c49182c6-kube-api-access-vh6lb\") pod \"5e40d00b-b8ee-4cf9-859a-5e92c49182c6\" (UID: \"5e40d00b-b8ee-4cf9-859a-5e92c49182c6\") " Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.410761 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5e40d00b-b8ee-4cf9-859a-5e92c49182c6-dns-svc\") pod \"5e40d00b-b8ee-4cf9-859a-5e92c49182c6\" (UID: \"5e40d00b-b8ee-4cf9-859a-5e92c49182c6\") " Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.410865 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e40d00b-b8ee-4cf9-859a-5e92c49182c6-config\") pod \"5e40d00b-b8ee-4cf9-859a-5e92c49182c6\" (UID: \"5e40d00b-b8ee-4cf9-859a-5e92c49182c6\") " Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.411207 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5e40d00b-b8ee-4cf9-859a-5e92c49182c6-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "5e40d00b-b8ee-4cf9-859a-5e92c49182c6" (UID: "5e40d00b-b8ee-4cf9-859a-5e92c49182c6"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.411661 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5e40d00b-b8ee-4cf9-859a-5e92c49182c6-config" (OuterVolumeSpecName: "config") pod "5e40d00b-b8ee-4cf9-859a-5e92c49182c6" (UID: "5e40d00b-b8ee-4cf9-859a-5e92c49182c6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.411776 4716 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5e40d00b-b8ee-4cf9-859a-5e92c49182c6-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.416669 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5e40d00b-b8ee-4cf9-859a-5e92c49182c6-kube-api-access-vh6lb" (OuterVolumeSpecName: "kube-api-access-vh6lb") pod "5e40d00b-b8ee-4cf9-859a-5e92c49182c6" (UID: "5e40d00b-b8ee-4cf9-859a-5e92c49182c6"). InnerVolumeSpecName "kube-api-access-vh6lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.424009 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-l6zl8" event={"ID":"31f45c75-847f-4adc-8bdf-3438bf5f9a4c","Type":"ContainerStarted","Data":"3feaedaf98acabce942a90f852214671fafe055ad4ec60b497d1342420742a85"} Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.425329 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7fd796d7df-l6zl8" Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.426764 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-784v8" event={"ID":"5e40d00b-b8ee-4cf9-859a-5e92c49182c6","Type":"ContainerDied","Data":"c194aaf7b53f06fad91561f99ee5d6a842e6b1753388cf7b52f40e62557e5e3e"} Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.426895 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-784v8" Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.458839 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7fd796d7df-l6zl8" podStartSLOduration=25.989944981 podStartE2EDuration="28.458795687s" podCreationTimestamp="2025-12-09 15:30:37 +0000 UTC" firstStartedPulling="2025-12-09 15:30:59.99627244 +0000 UTC m=+1347.151016428" lastFinishedPulling="2025-12-09 15:31:02.465123146 +0000 UTC m=+1349.619867134" observedRunningTime="2025-12-09 15:31:05.442047775 +0000 UTC m=+1352.596791773" watchObservedRunningTime="2025-12-09 15:31:05.458795687 +0000 UTC m=+1352.613539675" Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.514955 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e40d00b-b8ee-4cf9-859a-5e92c49182c6-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.514988 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vh6lb\" (UniqueName: \"kubernetes.io/projected/5e40d00b-b8ee-4cf9-859a-5e92c49182c6-kube-api-access-vh6lb\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.582076 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-784v8"] Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.596961 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-784v8"] Dec 09 15:31:05 crc kubenswrapper[4716]: I1209 15:31:05.797310 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-x9c54"] Dec 09 15:31:05 crc kubenswrapper[4716]: W1209 15:31:05.803813 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc63bdfde_4ca8_4048_8508_a7be505e25f0.slice/crio-9eda59012491fa7ad02d3eee24e746d723ee13e4b24dabc311b0ba6842d05118 WatchSource:0}: Error finding container 9eda59012491fa7ad02d3eee24e746d723ee13e4b24dabc311b0ba6842d05118: Status 404 returned error can't find the container with id 9eda59012491fa7ad02d3eee24e746d723ee13e4b24dabc311b0ba6842d05118 Dec 09 15:31:06 crc kubenswrapper[4716]: I1209 15:31:06.437697 4716 generic.go:334] "Generic (PLEG): container finished" podID="c63bdfde-4ca8-4048-8508-a7be505e25f0" containerID="6107c84b96e6d73a3e1258a1173b780c1f6462395dfba1b85138b22ee1397ef3" exitCode=0 Dec 09 15:31:06 crc kubenswrapper[4716]: I1209 15:31:06.437802 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-x9c54" event={"ID":"c63bdfde-4ca8-4048-8508-a7be505e25f0","Type":"ContainerDied","Data":"6107c84b96e6d73a3e1258a1173b780c1f6462395dfba1b85138b22ee1397ef3"} Dec 09 15:31:06 crc kubenswrapper[4716]: I1209 15:31:06.438980 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-x9c54" event={"ID":"c63bdfde-4ca8-4048-8508-a7be505e25f0","Type":"ContainerStarted","Data":"9eda59012491fa7ad02d3eee24e746d723ee13e4b24dabc311b0ba6842d05118"} Dec 09 15:31:07 crc kubenswrapper[4716]: I1209 15:31:07.226710 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5e40d00b-b8ee-4cf9-859a-5e92c49182c6" path="/var/lib/kubelet/pods/5e40d00b-b8ee-4cf9-859a-5e92c49182c6/volumes" Dec 09 15:31:07 crc kubenswrapper[4716]: I1209 15:31:07.450057 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-x9c54" event={"ID":"c63bdfde-4ca8-4048-8508-a7be505e25f0","Type":"ContainerStarted","Data":"90c97b90195b5283b8b542647d98eee50d83575e9d1f074ac6c9335b2d409b03"} Dec 09 15:31:07 crc kubenswrapper[4716]: I1209 15:31:07.450569 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-86db49b7ff-x9c54" Dec 09 15:31:07 crc kubenswrapper[4716]: I1209 15:31:07.452266 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"c32f00d5-f870-4dc6-8387-81bfe37b06f8","Type":"ContainerStarted","Data":"eb2635ef1f6c89d668bf420f9c0351a17fcbd7f9636d6b064be741df939db47f"} Dec 09 15:31:07 crc kubenswrapper[4716]: I1209 15:31:07.488418 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-86db49b7ff-x9c54" podStartSLOduration=3.488388762 podStartE2EDuration="3.488388762s" podCreationTimestamp="2025-12-09 15:31:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:31:07.479329621 +0000 UTC m=+1354.634073619" watchObservedRunningTime="2025-12-09 15:31:07.488388762 +0000 UTC m=+1354.643132750" Dec 09 15:31:07 crc kubenswrapper[4716]: I1209 15:31:07.682583 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Dec 09 15:31:07 crc kubenswrapper[4716]: I1209 15:31:07.683144 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Dec 09 15:31:08 crc kubenswrapper[4716]: I1209 15:31:08.507988 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Dec 09 15:31:09 crc kubenswrapper[4716]: I1209 15:31:09.460048 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Dec 09 15:31:09 crc kubenswrapper[4716]: I1209 15:31:09.475569 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"b0be2722-84d6-4885-80bc-a795d7f2c05e","Type":"ContainerStarted","Data":"d9bf61dbefbb3f26a44112bfd21f88ec9ceea6753133d7630a743d78b7714a67"} Dec 09 15:31:10 crc kubenswrapper[4716]: I1209 15:31:10.485923 4716 generic.go:334] "Generic (PLEG): container finished" podID="be971cf7-04bf-4487-a95a-64bb6ea739a7" containerID="495261baab8203428ad1e59ee3fec94e586c67c04630e4401708f91e3200401e" exitCode=0 Dec 09 15:31:10 crc kubenswrapper[4716]: I1209 15:31:10.486024 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"be971cf7-04bf-4487-a95a-64bb6ea739a7","Type":"ContainerDied","Data":"495261baab8203428ad1e59ee3fec94e586c67c04630e4401708f91e3200401e"} Dec 09 15:31:10 crc kubenswrapper[4716]: I1209 15:31:10.489154 4716 generic.go:334] "Generic (PLEG): container finished" podID="c32f00d5-f870-4dc6-8387-81bfe37b06f8" containerID="eb2635ef1f6c89d668bf420f9c0351a17fcbd7f9636d6b064be741df939db47f" exitCode=0 Dec 09 15:31:10 crc kubenswrapper[4716]: I1209 15:31:10.489219 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"c32f00d5-f870-4dc6-8387-81bfe37b06f8","Type":"ContainerDied","Data":"eb2635ef1f6c89d668bf420f9c0351a17fcbd7f9636d6b064be741df939db47f"} Dec 09 15:31:11 crc kubenswrapper[4716]: I1209 15:31:11.499448 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"2e140762-44f7-46f9-9bbe-a8f780186869","Type":"ContainerStarted","Data":"30392210ad09dadc4c995e968b32b6d395c64e8fffbf6808673e45e67027cea0"} Dec 09 15:31:11 crc kubenswrapper[4716]: I1209 15:31:11.501803 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f417726f-0022-42f5-bfe8-79f6605d557c","Type":"ContainerStarted","Data":"ac28d70edbf9692f0120f0aceb7ea5384f675a7413c2f464665d3e608f33765c"} Dec 09 15:31:11 crc kubenswrapper[4716]: I1209 15:31:11.503778 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"c32f00d5-f870-4dc6-8387-81bfe37b06f8","Type":"ContainerStarted","Data":"b65adf14c7bcad248008fabeba2ef845cdd8e10fd848d62fd07b7e5b3d62e607"} Dec 09 15:31:11 crc kubenswrapper[4716]: I1209 15:31:11.582229 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=20.156110424 podStartE2EDuration="1m6.582203401s" podCreationTimestamp="2025-12-09 15:30:05 +0000 UTC" firstStartedPulling="2025-12-09 15:30:20.322073481 +0000 UTC m=+1307.476817469" lastFinishedPulling="2025-12-09 15:31:06.748166468 +0000 UTC m=+1353.902910446" observedRunningTime="2025-12-09 15:31:11.573511301 +0000 UTC m=+1358.728255299" watchObservedRunningTime="2025-12-09 15:31:11.582203401 +0000 UTC m=+1358.736947389" Dec 09 15:31:12 crc kubenswrapper[4716]: I1209 15:31:12.513525 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"8cd62374-51f5-4445-94f2-3d575475d8e6","Type":"ContainerStarted","Data":"4964cb161a626512ca09fa63000fe7db72ca7e2703ce041ec0e5040d3fe0dc32"} Dec 09 15:31:12 crc kubenswrapper[4716]: I1209 15:31:12.514092 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Dec 09 15:31:12 crc kubenswrapper[4716]: I1209 15:31:12.517883 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-8mm7h" event={"ID":"890cdf81-6c51-4954-a7fc-ea6116941cfe","Type":"ContainerStarted","Data":"0e105323c3315e826a15b5b9e6df3985ff0b2495240d2d87a56e6bcd4ece898b"} Dec 09 15:31:12 crc kubenswrapper[4716]: I1209 15:31:12.518033 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-8mm7h" Dec 09 15:31:12 crc kubenswrapper[4716]: I1209 15:31:12.524250 4716 generic.go:334] "Generic (PLEG): container finished" podID="b0be2722-84d6-4885-80bc-a795d7f2c05e" containerID="d9bf61dbefbb3f26a44112bfd21f88ec9ceea6753133d7630a743d78b7714a67" exitCode=0 Dec 09 15:31:12 crc kubenswrapper[4716]: I1209 15:31:12.524302 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"b0be2722-84d6-4885-80bc-a795d7f2c05e","Type":"ContainerDied","Data":"d9bf61dbefbb3f26a44112bfd21f88ec9ceea6753133d7630a743d78b7714a67"} Dec 09 15:31:12 crc kubenswrapper[4716]: I1209 15:31:12.533040 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=4.692759914 podStartE2EDuration="49.533017655s" podCreationTimestamp="2025-12-09 15:30:23 +0000 UTC" firstStartedPulling="2025-12-09 15:30:26.746919954 +0000 UTC m=+1313.901663932" lastFinishedPulling="2025-12-09 15:31:11.587177685 +0000 UTC m=+1358.741921673" observedRunningTime="2025-12-09 15:31:12.52866503 +0000 UTC m=+1359.683409018" watchObservedRunningTime="2025-12-09 15:31:12.533017655 +0000 UTC m=+1359.687761643" Dec 09 15:31:12 crc kubenswrapper[4716]: I1209 15:31:12.556100 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-8mm7h" podStartSLOduration=4.011250344 podStartE2EDuration="47.55607852s" podCreationTimestamp="2025-12-09 15:30:25 +0000 UTC" firstStartedPulling="2025-12-09 15:30:28.18985035 +0000 UTC m=+1315.344594338" lastFinishedPulling="2025-12-09 15:31:11.734678526 +0000 UTC m=+1358.889422514" observedRunningTime="2025-12-09 15:31:12.543345543 +0000 UTC m=+1359.698089531" watchObservedRunningTime="2025-12-09 15:31:12.55607852 +0000 UTC m=+1359.710822508" Dec 09 15:31:13 crc kubenswrapper[4716]: I1209 15:31:13.276790 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7fd796d7df-l6zl8" Dec 09 15:31:13 crc kubenswrapper[4716]: I1209 15:31:13.548037 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"b0be2722-84d6-4885-80bc-a795d7f2c05e","Type":"ContainerStarted","Data":"96bb8557526c725e135ba65e8274aa8c4b9ad5a6282a4eb839994ac73c064bd1"} Dec 09 15:31:13 crc kubenswrapper[4716]: I1209 15:31:13.591794 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=-9223371963.263014 podStartE2EDuration="1m13.591762589s" podCreationTimestamp="2025-12-09 15:30:00 +0000 UTC" firstStartedPulling="2025-12-09 15:30:10.419271238 +0000 UTC m=+1297.574015226" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:31:13.580402042 +0000 UTC m=+1360.735146030" watchObservedRunningTime="2025-12-09 15:31:13.591762589 +0000 UTC m=+1360.746506577" Dec 09 15:31:13 crc kubenswrapper[4716]: I1209 15:31:13.667856 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Dec 09 15:31:13 crc kubenswrapper[4716]: I1209 15:31:13.668084 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Dec 09 15:31:13 crc kubenswrapper[4716]: I1209 15:31:13.693194 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-x9c54"] Dec 09 15:31:13 crc kubenswrapper[4716]: I1209 15:31:13.693448 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-86db49b7ff-x9c54" podUID="c63bdfde-4ca8-4048-8508-a7be505e25f0" containerName="dnsmasq-dns" containerID="cri-o://90c97b90195b5283b8b542647d98eee50d83575e9d1f074ac6c9335b2d409b03" gracePeriod=10 Dec 09 15:31:13 crc kubenswrapper[4716]: I1209 15:31:13.697004 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-86db49b7ff-x9c54" Dec 09 15:31:13 crc kubenswrapper[4716]: I1209 15:31:13.753959 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-698758b865-nwgsw"] Dec 09 15:31:13 crc kubenswrapper[4716]: I1209 15:31:13.758942 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-nwgsw" Dec 09 15:31:13 crc kubenswrapper[4716]: I1209 15:31:13.790869 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-nwgsw"] Dec 09 15:31:13 crc kubenswrapper[4716]: I1209 15:31:13.815368 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5e0192ce-fa22-42ad-9c07-5154aa5b3801-dns-svc\") pod \"dnsmasq-dns-698758b865-nwgsw\" (UID: \"5e0192ce-fa22-42ad-9c07-5154aa5b3801\") " pod="openstack/dnsmasq-dns-698758b865-nwgsw" Dec 09 15:31:13 crc kubenswrapper[4716]: I1209 15:31:13.815781 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8d75v\" (UniqueName: \"kubernetes.io/projected/5e0192ce-fa22-42ad-9c07-5154aa5b3801-kube-api-access-8d75v\") pod \"dnsmasq-dns-698758b865-nwgsw\" (UID: \"5e0192ce-fa22-42ad-9c07-5154aa5b3801\") " pod="openstack/dnsmasq-dns-698758b865-nwgsw" Dec 09 15:31:13 crc kubenswrapper[4716]: I1209 15:31:13.815956 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e0192ce-fa22-42ad-9c07-5154aa5b3801-config\") pod \"dnsmasq-dns-698758b865-nwgsw\" (UID: \"5e0192ce-fa22-42ad-9c07-5154aa5b3801\") " pod="openstack/dnsmasq-dns-698758b865-nwgsw" Dec 09 15:31:13 crc kubenswrapper[4716]: I1209 15:31:13.816178 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5e0192ce-fa22-42ad-9c07-5154aa5b3801-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-nwgsw\" (UID: \"5e0192ce-fa22-42ad-9c07-5154aa5b3801\") " pod="openstack/dnsmasq-dns-698758b865-nwgsw" Dec 09 15:31:13 crc kubenswrapper[4716]: I1209 15:31:13.816300 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5e0192ce-fa22-42ad-9c07-5154aa5b3801-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-nwgsw\" (UID: \"5e0192ce-fa22-42ad-9c07-5154aa5b3801\") " pod="openstack/dnsmasq-dns-698758b865-nwgsw" Dec 09 15:31:13 crc kubenswrapper[4716]: I1209 15:31:13.924385 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8d75v\" (UniqueName: \"kubernetes.io/projected/5e0192ce-fa22-42ad-9c07-5154aa5b3801-kube-api-access-8d75v\") pod \"dnsmasq-dns-698758b865-nwgsw\" (UID: \"5e0192ce-fa22-42ad-9c07-5154aa5b3801\") " pod="openstack/dnsmasq-dns-698758b865-nwgsw" Dec 09 15:31:13 crc kubenswrapper[4716]: I1209 15:31:13.924819 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e0192ce-fa22-42ad-9c07-5154aa5b3801-config\") pod \"dnsmasq-dns-698758b865-nwgsw\" (UID: \"5e0192ce-fa22-42ad-9c07-5154aa5b3801\") " pod="openstack/dnsmasq-dns-698758b865-nwgsw" Dec 09 15:31:13 crc kubenswrapper[4716]: I1209 15:31:13.924962 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5e0192ce-fa22-42ad-9c07-5154aa5b3801-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-nwgsw\" (UID: \"5e0192ce-fa22-42ad-9c07-5154aa5b3801\") " pod="openstack/dnsmasq-dns-698758b865-nwgsw" Dec 09 15:31:13 crc kubenswrapper[4716]: I1209 15:31:13.925012 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5e0192ce-fa22-42ad-9c07-5154aa5b3801-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-nwgsw\" (UID: \"5e0192ce-fa22-42ad-9c07-5154aa5b3801\") " pod="openstack/dnsmasq-dns-698758b865-nwgsw" Dec 09 15:31:13 crc kubenswrapper[4716]: I1209 15:31:13.925076 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5e0192ce-fa22-42ad-9c07-5154aa5b3801-dns-svc\") pod \"dnsmasq-dns-698758b865-nwgsw\" (UID: \"5e0192ce-fa22-42ad-9c07-5154aa5b3801\") " pod="openstack/dnsmasq-dns-698758b865-nwgsw" Dec 09 15:31:13 crc kubenswrapper[4716]: I1209 15:31:13.927490 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e0192ce-fa22-42ad-9c07-5154aa5b3801-config\") pod \"dnsmasq-dns-698758b865-nwgsw\" (UID: \"5e0192ce-fa22-42ad-9c07-5154aa5b3801\") " pod="openstack/dnsmasq-dns-698758b865-nwgsw" Dec 09 15:31:13 crc kubenswrapper[4716]: I1209 15:31:13.927490 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5e0192ce-fa22-42ad-9c07-5154aa5b3801-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-nwgsw\" (UID: \"5e0192ce-fa22-42ad-9c07-5154aa5b3801\") " pod="openstack/dnsmasq-dns-698758b865-nwgsw" Dec 09 15:31:13 crc kubenswrapper[4716]: I1209 15:31:13.930914 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5e0192ce-fa22-42ad-9c07-5154aa5b3801-dns-svc\") pod \"dnsmasq-dns-698758b865-nwgsw\" (UID: \"5e0192ce-fa22-42ad-9c07-5154aa5b3801\") " pod="openstack/dnsmasq-dns-698758b865-nwgsw" Dec 09 15:31:13 crc kubenswrapper[4716]: I1209 15:31:13.930925 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5e0192ce-fa22-42ad-9c07-5154aa5b3801-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-nwgsw\" (UID: \"5e0192ce-fa22-42ad-9c07-5154aa5b3801\") " pod="openstack/dnsmasq-dns-698758b865-nwgsw" Dec 09 15:31:13 crc kubenswrapper[4716]: I1209 15:31:13.968662 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8d75v\" (UniqueName: \"kubernetes.io/projected/5e0192ce-fa22-42ad-9c07-5154aa5b3801-kube-api-access-8d75v\") pod \"dnsmasq-dns-698758b865-nwgsw\" (UID: \"5e0192ce-fa22-42ad-9c07-5154aa5b3801\") " pod="openstack/dnsmasq-dns-698758b865-nwgsw" Dec 09 15:31:14 crc kubenswrapper[4716]: I1209 15:31:14.106291 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-nwgsw" Dec 09 15:31:14 crc kubenswrapper[4716]: I1209 15:31:14.559469 4716 generic.go:334] "Generic (PLEG): container finished" podID="c63bdfde-4ca8-4048-8508-a7be505e25f0" containerID="90c97b90195b5283b8b542647d98eee50d83575e9d1f074ac6c9335b2d409b03" exitCode=0 Dec 09 15:31:14 crc kubenswrapper[4716]: I1209 15:31:14.559538 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-x9c54" event={"ID":"c63bdfde-4ca8-4048-8508-a7be505e25f0","Type":"ContainerDied","Data":"90c97b90195b5283b8b542647d98eee50d83575e9d1f074ac6c9335b2d409b03"} Dec 09 15:31:14 crc kubenswrapper[4716]: I1209 15:31:14.671187 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-nwgsw"] Dec 09 15:31:14 crc kubenswrapper[4716]: W1209 15:31:14.681898 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5e0192ce_fa22_42ad_9c07_5154aa5b3801.slice/crio-70ceb9de9d224e1acc5b4ab6f7fa8104ac749502830c495198e31caba5c077f2 WatchSource:0}: Error finding container 70ceb9de9d224e1acc5b4ab6f7fa8104ac749502830c495198e31caba5c077f2: Status 404 returned error can't find the container with id 70ceb9de9d224e1acc5b4ab6f7fa8104ac749502830c495198e31caba5c077f2 Dec 09 15:31:14 crc kubenswrapper[4716]: I1209 15:31:14.863260 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Dec 09 15:31:14 crc kubenswrapper[4716]: I1209 15:31:14.870570 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Dec 09 15:31:14 crc kubenswrapper[4716]: I1209 15:31:14.874445 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Dec 09 15:31:14 crc kubenswrapper[4716]: I1209 15:31:14.874587 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-629xf" Dec 09 15:31:14 crc kubenswrapper[4716]: I1209 15:31:14.874601 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Dec 09 15:31:14 crc kubenswrapper[4716]: I1209 15:31:14.877332 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Dec 09 15:31:14 crc kubenswrapper[4716]: I1209 15:31:14.883708 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.052548 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/552d079e-332b-46db-946b-2777875f3dc7-etc-swift\") pod \"swift-storage-0\" (UID: \"552d079e-332b-46db-946b-2777875f3dc7\") " pod="openstack/swift-storage-0" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.052695 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wrnbf\" (UniqueName: \"kubernetes.io/projected/552d079e-332b-46db-946b-2777875f3dc7-kube-api-access-wrnbf\") pod \"swift-storage-0\" (UID: \"552d079e-332b-46db-946b-2777875f3dc7\") " pod="openstack/swift-storage-0" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.052854 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/552d079e-332b-46db-946b-2777875f3dc7-lock\") pod \"swift-storage-0\" (UID: \"552d079e-332b-46db-946b-2777875f3dc7\") " pod="openstack/swift-storage-0" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.052974 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/552d079e-332b-46db-946b-2777875f3dc7-cache\") pod \"swift-storage-0\" (UID: \"552d079e-332b-46db-946b-2777875f3dc7\") " pod="openstack/swift-storage-0" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.053014 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"swift-storage-0\" (UID: \"552d079e-332b-46db-946b-2777875f3dc7\") " pod="openstack/swift-storage-0" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.154523 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/552d079e-332b-46db-946b-2777875f3dc7-etc-swift\") pod \"swift-storage-0\" (UID: \"552d079e-332b-46db-946b-2777875f3dc7\") " pod="openstack/swift-storage-0" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.154588 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wrnbf\" (UniqueName: \"kubernetes.io/projected/552d079e-332b-46db-946b-2777875f3dc7-kube-api-access-wrnbf\") pod \"swift-storage-0\" (UID: \"552d079e-332b-46db-946b-2777875f3dc7\") " pod="openstack/swift-storage-0" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.154676 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/552d079e-332b-46db-946b-2777875f3dc7-lock\") pod \"swift-storage-0\" (UID: \"552d079e-332b-46db-946b-2777875f3dc7\") " pod="openstack/swift-storage-0" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.154721 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/552d079e-332b-46db-946b-2777875f3dc7-cache\") pod \"swift-storage-0\" (UID: \"552d079e-332b-46db-946b-2777875f3dc7\") " pod="openstack/swift-storage-0" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.154743 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"swift-storage-0\" (UID: \"552d079e-332b-46db-946b-2777875f3dc7\") " pod="openstack/swift-storage-0" Dec 09 15:31:15 crc kubenswrapper[4716]: E1209 15:31:15.154810 4716 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 09 15:31:15 crc kubenswrapper[4716]: E1209 15:31:15.154844 4716 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 09 15:31:15 crc kubenswrapper[4716]: E1209 15:31:15.154907 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/552d079e-332b-46db-946b-2777875f3dc7-etc-swift podName:552d079e-332b-46db-946b-2777875f3dc7 nodeName:}" failed. No retries permitted until 2025-12-09 15:31:15.654887771 +0000 UTC m=+1362.809631759 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/552d079e-332b-46db-946b-2777875f3dc7-etc-swift") pod "swift-storage-0" (UID: "552d079e-332b-46db-946b-2777875f3dc7") : configmap "swift-ring-files" not found Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.155135 4716 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"swift-storage-0\" (UID: \"552d079e-332b-46db-946b-2777875f3dc7\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/swift-storage-0" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.155405 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/552d079e-332b-46db-946b-2777875f3dc7-cache\") pod \"swift-storage-0\" (UID: \"552d079e-332b-46db-946b-2777875f3dc7\") " pod="openstack/swift-storage-0" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.155458 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/552d079e-332b-46db-946b-2777875f3dc7-lock\") pod \"swift-storage-0\" (UID: \"552d079e-332b-46db-946b-2777875f3dc7\") " pod="openstack/swift-storage-0" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.179440 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wrnbf\" (UniqueName: \"kubernetes.io/projected/552d079e-332b-46db-946b-2777875f3dc7-kube-api-access-wrnbf\") pod \"swift-storage-0\" (UID: \"552d079e-332b-46db-946b-2777875f3dc7\") " pod="openstack/swift-storage-0" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.193712 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"swift-storage-0\" (UID: \"552d079e-332b-46db-946b-2777875f3dc7\") " pod="openstack/swift-storage-0" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.333579 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-hkncs"] Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.335006 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-hkncs" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.339706 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.339837 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.339904 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.348092 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-hkncs"] Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.361214 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-86db49b7ff-x9c54" podUID="c63bdfde-4ca8-4048-8508-a7be505e25f0" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.144:5353: connect: connection refused" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.462475 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/5b9ed832-324f-4995-8277-6e979cfc7bc1-swiftconf\") pod \"swift-ring-rebalance-hkncs\" (UID: \"5b9ed832-324f-4995-8277-6e979cfc7bc1\") " pod="openstack/swift-ring-rebalance-hkncs" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.462549 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/5b9ed832-324f-4995-8277-6e979cfc7bc1-ring-data-devices\") pod \"swift-ring-rebalance-hkncs\" (UID: \"5b9ed832-324f-4995-8277-6e979cfc7bc1\") " pod="openstack/swift-ring-rebalance-hkncs" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.462598 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b9ed832-324f-4995-8277-6e979cfc7bc1-combined-ca-bundle\") pod \"swift-ring-rebalance-hkncs\" (UID: \"5b9ed832-324f-4995-8277-6e979cfc7bc1\") " pod="openstack/swift-ring-rebalance-hkncs" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.462681 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/5b9ed832-324f-4995-8277-6e979cfc7bc1-etc-swift\") pod \"swift-ring-rebalance-hkncs\" (UID: \"5b9ed832-324f-4995-8277-6e979cfc7bc1\") " pod="openstack/swift-ring-rebalance-hkncs" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.462710 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tx6dx\" (UniqueName: \"kubernetes.io/projected/5b9ed832-324f-4995-8277-6e979cfc7bc1-kube-api-access-tx6dx\") pod \"swift-ring-rebalance-hkncs\" (UID: \"5b9ed832-324f-4995-8277-6e979cfc7bc1\") " pod="openstack/swift-ring-rebalance-hkncs" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.462793 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5b9ed832-324f-4995-8277-6e979cfc7bc1-scripts\") pod \"swift-ring-rebalance-hkncs\" (UID: \"5b9ed832-324f-4995-8277-6e979cfc7bc1\") " pod="openstack/swift-ring-rebalance-hkncs" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.462891 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/5b9ed832-324f-4995-8277-6e979cfc7bc1-dispersionconf\") pod \"swift-ring-rebalance-hkncs\" (UID: \"5b9ed832-324f-4995-8277-6e979cfc7bc1\") " pod="openstack/swift-ring-rebalance-hkncs" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.564251 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/5b9ed832-324f-4995-8277-6e979cfc7bc1-swiftconf\") pod \"swift-ring-rebalance-hkncs\" (UID: \"5b9ed832-324f-4995-8277-6e979cfc7bc1\") " pod="openstack/swift-ring-rebalance-hkncs" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.564299 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/5b9ed832-324f-4995-8277-6e979cfc7bc1-ring-data-devices\") pod \"swift-ring-rebalance-hkncs\" (UID: \"5b9ed832-324f-4995-8277-6e979cfc7bc1\") " pod="openstack/swift-ring-rebalance-hkncs" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.564342 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b9ed832-324f-4995-8277-6e979cfc7bc1-combined-ca-bundle\") pod \"swift-ring-rebalance-hkncs\" (UID: \"5b9ed832-324f-4995-8277-6e979cfc7bc1\") " pod="openstack/swift-ring-rebalance-hkncs" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.564406 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/5b9ed832-324f-4995-8277-6e979cfc7bc1-etc-swift\") pod \"swift-ring-rebalance-hkncs\" (UID: \"5b9ed832-324f-4995-8277-6e979cfc7bc1\") " pod="openstack/swift-ring-rebalance-hkncs" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.564435 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tx6dx\" (UniqueName: \"kubernetes.io/projected/5b9ed832-324f-4995-8277-6e979cfc7bc1-kube-api-access-tx6dx\") pod \"swift-ring-rebalance-hkncs\" (UID: \"5b9ed832-324f-4995-8277-6e979cfc7bc1\") " pod="openstack/swift-ring-rebalance-hkncs" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.564480 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5b9ed832-324f-4995-8277-6e979cfc7bc1-scripts\") pod \"swift-ring-rebalance-hkncs\" (UID: \"5b9ed832-324f-4995-8277-6e979cfc7bc1\") " pod="openstack/swift-ring-rebalance-hkncs" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.564596 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/5b9ed832-324f-4995-8277-6e979cfc7bc1-dispersionconf\") pod \"swift-ring-rebalance-hkncs\" (UID: \"5b9ed832-324f-4995-8277-6e979cfc7bc1\") " pod="openstack/swift-ring-rebalance-hkncs" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.566155 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/5b9ed832-324f-4995-8277-6e979cfc7bc1-etc-swift\") pod \"swift-ring-rebalance-hkncs\" (UID: \"5b9ed832-324f-4995-8277-6e979cfc7bc1\") " pod="openstack/swift-ring-rebalance-hkncs" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.566506 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5b9ed832-324f-4995-8277-6e979cfc7bc1-scripts\") pod \"swift-ring-rebalance-hkncs\" (UID: \"5b9ed832-324f-4995-8277-6e979cfc7bc1\") " pod="openstack/swift-ring-rebalance-hkncs" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.566534 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/5b9ed832-324f-4995-8277-6e979cfc7bc1-ring-data-devices\") pod \"swift-ring-rebalance-hkncs\" (UID: \"5b9ed832-324f-4995-8277-6e979cfc7bc1\") " pod="openstack/swift-ring-rebalance-hkncs" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.568273 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/5b9ed832-324f-4995-8277-6e979cfc7bc1-dispersionconf\") pod \"swift-ring-rebalance-hkncs\" (UID: \"5b9ed832-324f-4995-8277-6e979cfc7bc1\") " pod="openstack/swift-ring-rebalance-hkncs" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.569259 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/5b9ed832-324f-4995-8277-6e979cfc7bc1-swiftconf\") pod \"swift-ring-rebalance-hkncs\" (UID: \"5b9ed832-324f-4995-8277-6e979cfc7bc1\") " pod="openstack/swift-ring-rebalance-hkncs" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.570050 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b9ed832-324f-4995-8277-6e979cfc7bc1-combined-ca-bundle\") pod \"swift-ring-rebalance-hkncs\" (UID: \"5b9ed832-324f-4995-8277-6e979cfc7bc1\") " pod="openstack/swift-ring-rebalance-hkncs" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.571701 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-nwgsw" event={"ID":"5e0192ce-fa22-42ad-9c07-5154aa5b3801","Type":"ContainerStarted","Data":"70ceb9de9d224e1acc5b4ab6f7fa8104ac749502830c495198e31caba5c077f2"} Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.586469 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tx6dx\" (UniqueName: \"kubernetes.io/projected/5b9ed832-324f-4995-8277-6e979cfc7bc1-kube-api-access-tx6dx\") pod \"swift-ring-rebalance-hkncs\" (UID: \"5b9ed832-324f-4995-8277-6e979cfc7bc1\") " pod="openstack/swift-ring-rebalance-hkncs" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.660683 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-hkncs" Dec 09 15:31:15 crc kubenswrapper[4716]: I1209 15:31:15.666163 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/552d079e-332b-46db-946b-2777875f3dc7-etc-swift\") pod \"swift-storage-0\" (UID: \"552d079e-332b-46db-946b-2777875f3dc7\") " pod="openstack/swift-storage-0" Dec 09 15:31:15 crc kubenswrapper[4716]: E1209 15:31:15.666559 4716 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 09 15:31:15 crc kubenswrapper[4716]: E1209 15:31:15.666684 4716 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 09 15:31:15 crc kubenswrapper[4716]: E1209 15:31:15.666808 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/552d079e-332b-46db-946b-2777875f3dc7-etc-swift podName:552d079e-332b-46db-946b-2777875f3dc7 nodeName:}" failed. No retries permitted until 2025-12-09 15:31:16.666784204 +0000 UTC m=+1363.821528252 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/552d079e-332b-46db-946b-2777875f3dc7-etc-swift") pod "swift-storage-0" (UID: "552d079e-332b-46db-946b-2777875f3dc7") : configmap "swift-ring-files" not found Dec 09 15:31:18 crc kubenswrapper[4716]: I1209 15:31:16.688543 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/552d079e-332b-46db-946b-2777875f3dc7-etc-swift\") pod \"swift-storage-0\" (UID: \"552d079e-332b-46db-946b-2777875f3dc7\") " pod="openstack/swift-storage-0" Dec 09 15:31:18 crc kubenswrapper[4716]: E1209 15:31:16.688793 4716 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 09 15:31:18 crc kubenswrapper[4716]: E1209 15:31:16.688961 4716 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 09 15:31:18 crc kubenswrapper[4716]: E1209 15:31:16.689032 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/552d079e-332b-46db-946b-2777875f3dc7-etc-swift podName:552d079e-332b-46db-946b-2777875f3dc7 nodeName:}" failed. No retries permitted until 2025-12-09 15:31:18.689009056 +0000 UTC m=+1365.843753044 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/552d079e-332b-46db-946b-2777875f3dc7-etc-swift") pod "swift-storage-0" (UID: "552d079e-332b-46db-946b-2777875f3dc7") : configmap "swift-ring-files" not found Dec 09 15:31:18 crc kubenswrapper[4716]: I1209 15:31:17.590084 4716 generic.go:334] "Generic (PLEG): container finished" podID="5e0192ce-fa22-42ad-9c07-5154aa5b3801" containerID="89f8649d8de36b3186a92b11c3e5e0c4155a8670ed25422646bdcfed91d890f0" exitCode=0 Dec 09 15:31:18 crc kubenswrapper[4716]: I1209 15:31:17.590183 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-nwgsw" event={"ID":"5e0192ce-fa22-42ad-9c07-5154aa5b3801","Type":"ContainerDied","Data":"89f8649d8de36b3186a92b11c3e5e0c4155a8670ed25422646bdcfed91d890f0"} Dec 09 15:31:18 crc kubenswrapper[4716]: I1209 15:31:18.524388 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-hkncs"] Dec 09 15:31:18 crc kubenswrapper[4716]: I1209 15:31:18.745052 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/552d079e-332b-46db-946b-2777875f3dc7-etc-swift\") pod \"swift-storage-0\" (UID: \"552d079e-332b-46db-946b-2777875f3dc7\") " pod="openstack/swift-storage-0" Dec 09 15:31:18 crc kubenswrapper[4716]: E1209 15:31:18.745295 4716 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 09 15:31:18 crc kubenswrapper[4716]: E1209 15:31:18.745327 4716 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 09 15:31:18 crc kubenswrapper[4716]: E1209 15:31:18.745392 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/552d079e-332b-46db-946b-2777875f3dc7-etc-swift podName:552d079e-332b-46db-946b-2777875f3dc7 nodeName:}" failed. No retries permitted until 2025-12-09 15:31:22.745371593 +0000 UTC m=+1369.900115581 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/552d079e-332b-46db-946b-2777875f3dc7-etc-swift") pod "swift-storage-0" (UID: "552d079e-332b-46db-946b-2777875f3dc7") : configmap "swift-ring-files" not found Dec 09 15:31:18 crc kubenswrapper[4716]: I1209 15:31:18.891674 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Dec 09 15:31:18 crc kubenswrapper[4716]: I1209 15:31:18.891731 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Dec 09 15:31:19 crc kubenswrapper[4716]: I1209 15:31:19.847471 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Dec 09 15:31:19 crc kubenswrapper[4716]: I1209 15:31:19.967121 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Dec 09 15:31:22 crc kubenswrapper[4716]: I1209 15:31:22.415023 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-x9c54" Dec 09 15:31:22 crc kubenswrapper[4716]: I1209 15:31:22.529438 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c63bdfde-4ca8-4048-8508-a7be505e25f0-ovsdbserver-sb\") pod \"c63bdfde-4ca8-4048-8508-a7be505e25f0\" (UID: \"c63bdfde-4ca8-4048-8508-a7be505e25f0\") " Dec 09 15:31:22 crc kubenswrapper[4716]: I1209 15:31:22.529668 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c63bdfde-4ca8-4048-8508-a7be505e25f0-config\") pod \"c63bdfde-4ca8-4048-8508-a7be505e25f0\" (UID: \"c63bdfde-4ca8-4048-8508-a7be505e25f0\") " Dec 09 15:31:22 crc kubenswrapper[4716]: I1209 15:31:22.529757 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5grtd\" (UniqueName: \"kubernetes.io/projected/c63bdfde-4ca8-4048-8508-a7be505e25f0-kube-api-access-5grtd\") pod \"c63bdfde-4ca8-4048-8508-a7be505e25f0\" (UID: \"c63bdfde-4ca8-4048-8508-a7be505e25f0\") " Dec 09 15:31:22 crc kubenswrapper[4716]: I1209 15:31:22.529786 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c63bdfde-4ca8-4048-8508-a7be505e25f0-dns-svc\") pod \"c63bdfde-4ca8-4048-8508-a7be505e25f0\" (UID: \"c63bdfde-4ca8-4048-8508-a7be505e25f0\") " Dec 09 15:31:22 crc kubenswrapper[4716]: I1209 15:31:22.529823 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c63bdfde-4ca8-4048-8508-a7be505e25f0-ovsdbserver-nb\") pod \"c63bdfde-4ca8-4048-8508-a7be505e25f0\" (UID: \"c63bdfde-4ca8-4048-8508-a7be505e25f0\") " Dec 09 15:31:22 crc kubenswrapper[4716]: I1209 15:31:22.533330 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c63bdfde-4ca8-4048-8508-a7be505e25f0-kube-api-access-5grtd" (OuterVolumeSpecName: "kube-api-access-5grtd") pod "c63bdfde-4ca8-4048-8508-a7be505e25f0" (UID: "c63bdfde-4ca8-4048-8508-a7be505e25f0"). InnerVolumeSpecName "kube-api-access-5grtd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:31:22 crc kubenswrapper[4716]: I1209 15:31:22.580864 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c63bdfde-4ca8-4048-8508-a7be505e25f0-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "c63bdfde-4ca8-4048-8508-a7be505e25f0" (UID: "c63bdfde-4ca8-4048-8508-a7be505e25f0"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:31:22 crc kubenswrapper[4716]: I1209 15:31:22.581482 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c63bdfde-4ca8-4048-8508-a7be505e25f0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c63bdfde-4ca8-4048-8508-a7be505e25f0" (UID: "c63bdfde-4ca8-4048-8508-a7be505e25f0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:31:22 crc kubenswrapper[4716]: I1209 15:31:22.584542 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c63bdfde-4ca8-4048-8508-a7be505e25f0-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c63bdfde-4ca8-4048-8508-a7be505e25f0" (UID: "c63bdfde-4ca8-4048-8508-a7be505e25f0"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:31:22 crc kubenswrapper[4716]: I1209 15:31:22.585258 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c63bdfde-4ca8-4048-8508-a7be505e25f0-config" (OuterVolumeSpecName: "config") pod "c63bdfde-4ca8-4048-8508-a7be505e25f0" (UID: "c63bdfde-4ca8-4048-8508-a7be505e25f0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:31:22 crc kubenswrapper[4716]: I1209 15:31:22.632451 4716 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c63bdfde-4ca8-4048-8508-a7be505e25f0-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:22 crc kubenswrapper[4716]: I1209 15:31:22.632483 4716 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c63bdfde-4ca8-4048-8508-a7be505e25f0-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:22 crc kubenswrapper[4716]: I1209 15:31:22.632492 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c63bdfde-4ca8-4048-8508-a7be505e25f0-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:22 crc kubenswrapper[4716]: I1209 15:31:22.632503 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5grtd\" (UniqueName: \"kubernetes.io/projected/c63bdfde-4ca8-4048-8508-a7be505e25f0-kube-api-access-5grtd\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:22 crc kubenswrapper[4716]: I1209 15:31:22.632515 4716 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c63bdfde-4ca8-4048-8508-a7be505e25f0-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:22 crc kubenswrapper[4716]: I1209 15:31:22.635184 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-hkncs" event={"ID":"5b9ed832-324f-4995-8277-6e979cfc7bc1","Type":"ContainerStarted","Data":"c7d2040cf1da931d9c97382985334476be045d920ec8005b7119d866487b01a0"} Dec 09 15:31:22 crc kubenswrapper[4716]: I1209 15:31:22.637430 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-x9c54" event={"ID":"c63bdfde-4ca8-4048-8508-a7be505e25f0","Type":"ContainerDied","Data":"9eda59012491fa7ad02d3eee24e746d723ee13e4b24dabc311b0ba6842d05118"} Dec 09 15:31:22 crc kubenswrapper[4716]: I1209 15:31:22.637487 4716 scope.go:117] "RemoveContainer" containerID="90c97b90195b5283b8b542647d98eee50d83575e9d1f074ac6c9335b2d409b03" Dec 09 15:31:22 crc kubenswrapper[4716]: I1209 15:31:22.637527 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-x9c54" Dec 09 15:31:22 crc kubenswrapper[4716]: I1209 15:31:22.680638 4716 scope.go:117] "RemoveContainer" containerID="6107c84b96e6d73a3e1258a1173b780c1f6462395dfba1b85138b22ee1397ef3" Dec 09 15:31:22 crc kubenswrapper[4716]: I1209 15:31:22.682091 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-x9c54"] Dec 09 15:31:22 crc kubenswrapper[4716]: I1209 15:31:22.692762 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-x9c54"] Dec 09 15:31:22 crc kubenswrapper[4716]: I1209 15:31:22.836288 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/552d079e-332b-46db-946b-2777875f3dc7-etc-swift\") pod \"swift-storage-0\" (UID: \"552d079e-332b-46db-946b-2777875f3dc7\") " pod="openstack/swift-storage-0" Dec 09 15:31:22 crc kubenswrapper[4716]: E1209 15:31:22.836502 4716 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 09 15:31:22 crc kubenswrapper[4716]: E1209 15:31:22.836536 4716 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 09 15:31:22 crc kubenswrapper[4716]: E1209 15:31:22.836601 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/552d079e-332b-46db-946b-2777875f3dc7-etc-swift podName:552d079e-332b-46db-946b-2777875f3dc7 nodeName:}" failed. No retries permitted until 2025-12-09 15:31:30.836581227 +0000 UTC m=+1377.991325215 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/552d079e-332b-46db-946b-2777875f3dc7-etc-swift") pod "swift-storage-0" (UID: "552d079e-332b-46db-946b-2777875f3dc7") : configmap "swift-ring-files" not found Dec 09 15:31:22 crc kubenswrapper[4716]: I1209 15:31:22.978568 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Dec 09 15:31:23 crc kubenswrapper[4716]: I1209 15:31:23.057434 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Dec 09 15:31:23 crc kubenswrapper[4716]: I1209 15:31:23.228242 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c63bdfde-4ca8-4048-8508-a7be505e25f0" path="/var/lib/kubelet/pods/c63bdfde-4ca8-4048-8508-a7be505e25f0/volumes" Dec 09 15:31:23 crc kubenswrapper[4716]: I1209 15:31:23.649352 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"be971cf7-04bf-4487-a95a-64bb6ea739a7","Type":"ContainerStarted","Data":"3eef08a816bc1eafd7fa2e0b1f2a20ca53072de5f719ca6acf909ffe27ba4b76"} Dec 09 15:31:23 crc kubenswrapper[4716]: I1209 15:31:23.652837 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"fa255b9e-51c8-407a-aebe-606da43b9906","Type":"ContainerStarted","Data":"80160af254fc963c7cc7f85a2c1242fff158d6b6c48cc3d5eddafe79cef7434d"} Dec 09 15:31:23 crc kubenswrapper[4716]: I1209 15:31:23.655923 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-nwgsw" event={"ID":"5e0192ce-fa22-42ad-9c07-5154aa5b3801","Type":"ContainerStarted","Data":"8383510e1ab5a6016d5b78a2724e66be30548fb6416cf327c09930c84aa90b98"} Dec 09 15:31:23 crc kubenswrapper[4716]: I1209 15:31:23.682942 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=9.035698016 podStartE2EDuration="58.68291708s" podCreationTimestamp="2025-12-09 15:30:25 +0000 UTC" firstStartedPulling="2025-12-09 15:30:32.764128917 +0000 UTC m=+1319.918872905" lastFinishedPulling="2025-12-09 15:31:22.411347971 +0000 UTC m=+1369.566091969" observedRunningTime="2025-12-09 15:31:23.670016498 +0000 UTC m=+1370.824760486" watchObservedRunningTime="2025-12-09 15:31:23.68291708 +0000 UTC m=+1370.837661068" Dec 09 15:31:23 crc kubenswrapper[4716]: I1209 15:31:23.697743 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-698758b865-nwgsw" podStartSLOduration=10.697719407 podStartE2EDuration="10.697719407s" podCreationTimestamp="2025-12-09 15:31:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:31:23.686568485 +0000 UTC m=+1370.841312483" watchObservedRunningTime="2025-12-09 15:31:23.697719407 +0000 UTC m=+1370.852463395" Dec 09 15:31:23 crc kubenswrapper[4716]: I1209 15:31:23.923915 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-rchbw"] Dec 09 15:31:23 crc kubenswrapper[4716]: E1209 15:31:23.924453 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c63bdfde-4ca8-4048-8508-a7be505e25f0" containerName="dnsmasq-dns" Dec 09 15:31:23 crc kubenswrapper[4716]: I1209 15:31:23.924470 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="c63bdfde-4ca8-4048-8508-a7be505e25f0" containerName="dnsmasq-dns" Dec 09 15:31:23 crc kubenswrapper[4716]: E1209 15:31:23.924481 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c63bdfde-4ca8-4048-8508-a7be505e25f0" containerName="init" Dec 09 15:31:23 crc kubenswrapper[4716]: I1209 15:31:23.924488 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="c63bdfde-4ca8-4048-8508-a7be505e25f0" containerName="init" Dec 09 15:31:23 crc kubenswrapper[4716]: I1209 15:31:23.925536 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="c63bdfde-4ca8-4048-8508-a7be505e25f0" containerName="dnsmasq-dns" Dec 09 15:31:23 crc kubenswrapper[4716]: I1209 15:31:23.927885 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-rchbw" Dec 09 15:31:23 crc kubenswrapper[4716]: I1209 15:31:23.955558 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-rchbw"] Dec 09 15:31:24 crc kubenswrapper[4716]: I1209 15:31:24.040861 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Dec 09 15:31:24 crc kubenswrapper[4716]: I1209 15:31:24.048503 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-0e28-account-create-update-xwvcx"] Dec 09 15:31:24 crc kubenswrapper[4716]: I1209 15:31:24.051089 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0e28-account-create-update-xwvcx" Dec 09 15:31:24 crc kubenswrapper[4716]: I1209 15:31:24.060886 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g7rxx\" (UniqueName: \"kubernetes.io/projected/b12de1dd-ac2d-4a05-af7d-f675b007109d-kube-api-access-g7rxx\") pod \"mysqld-exporter-openstack-db-create-rchbw\" (UID: \"b12de1dd-ac2d-4a05-af7d-f675b007109d\") " pod="openstack/mysqld-exporter-openstack-db-create-rchbw" Dec 09 15:31:24 crc kubenswrapper[4716]: I1209 15:31:24.061163 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b12de1dd-ac2d-4a05-af7d-f675b007109d-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-rchbw\" (UID: \"b12de1dd-ac2d-4a05-af7d-f675b007109d\") " pod="openstack/mysqld-exporter-openstack-db-create-rchbw" Dec 09 15:31:24 crc kubenswrapper[4716]: I1209 15:31:24.062513 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-openstack-db-secret" Dec 09 15:31:24 crc kubenswrapper[4716]: I1209 15:31:24.062517 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0e28-account-create-update-xwvcx"] Dec 09 15:31:24 crc kubenswrapper[4716]: I1209 15:31:24.107139 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-698758b865-nwgsw" Dec 09 15:31:24 crc kubenswrapper[4716]: I1209 15:31:24.163115 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-26vt7\" (UniqueName: \"kubernetes.io/projected/20c9ed62-b26d-4c7e-b737-057d5afda2da-kube-api-access-26vt7\") pod \"mysqld-exporter-0e28-account-create-update-xwvcx\" (UID: \"20c9ed62-b26d-4c7e-b737-057d5afda2da\") " pod="openstack/mysqld-exporter-0e28-account-create-update-xwvcx" Dec 09 15:31:24 crc kubenswrapper[4716]: I1209 15:31:24.163277 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g7rxx\" (UniqueName: \"kubernetes.io/projected/b12de1dd-ac2d-4a05-af7d-f675b007109d-kube-api-access-g7rxx\") pod \"mysqld-exporter-openstack-db-create-rchbw\" (UID: \"b12de1dd-ac2d-4a05-af7d-f675b007109d\") " pod="openstack/mysqld-exporter-openstack-db-create-rchbw" Dec 09 15:31:24 crc kubenswrapper[4716]: I1209 15:31:24.163331 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/20c9ed62-b26d-4c7e-b737-057d5afda2da-operator-scripts\") pod \"mysqld-exporter-0e28-account-create-update-xwvcx\" (UID: \"20c9ed62-b26d-4c7e-b737-057d5afda2da\") " pod="openstack/mysqld-exporter-0e28-account-create-update-xwvcx" Dec 09 15:31:24 crc kubenswrapper[4716]: I1209 15:31:24.163383 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b12de1dd-ac2d-4a05-af7d-f675b007109d-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-rchbw\" (UID: \"b12de1dd-ac2d-4a05-af7d-f675b007109d\") " pod="openstack/mysqld-exporter-openstack-db-create-rchbw" Dec 09 15:31:24 crc kubenswrapper[4716]: I1209 15:31:24.165142 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b12de1dd-ac2d-4a05-af7d-f675b007109d-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-rchbw\" (UID: \"b12de1dd-ac2d-4a05-af7d-f675b007109d\") " pod="openstack/mysqld-exporter-openstack-db-create-rchbw" Dec 09 15:31:24 crc kubenswrapper[4716]: I1209 15:31:24.184424 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g7rxx\" (UniqueName: \"kubernetes.io/projected/b12de1dd-ac2d-4a05-af7d-f675b007109d-kube-api-access-g7rxx\") pod \"mysqld-exporter-openstack-db-create-rchbw\" (UID: \"b12de1dd-ac2d-4a05-af7d-f675b007109d\") " pod="openstack/mysqld-exporter-openstack-db-create-rchbw" Dec 09 15:31:24 crc kubenswrapper[4716]: I1209 15:31:24.251872 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-rchbw" Dec 09 15:31:24 crc kubenswrapper[4716]: I1209 15:31:24.265726 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/20c9ed62-b26d-4c7e-b737-057d5afda2da-operator-scripts\") pod \"mysqld-exporter-0e28-account-create-update-xwvcx\" (UID: \"20c9ed62-b26d-4c7e-b737-057d5afda2da\") " pod="openstack/mysqld-exporter-0e28-account-create-update-xwvcx" Dec 09 15:31:24 crc kubenswrapper[4716]: I1209 15:31:24.265884 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-26vt7\" (UniqueName: \"kubernetes.io/projected/20c9ed62-b26d-4c7e-b737-057d5afda2da-kube-api-access-26vt7\") pod \"mysqld-exporter-0e28-account-create-update-xwvcx\" (UID: \"20c9ed62-b26d-4c7e-b737-057d5afda2da\") " pod="openstack/mysqld-exporter-0e28-account-create-update-xwvcx" Dec 09 15:31:24 crc kubenswrapper[4716]: I1209 15:31:24.266534 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/20c9ed62-b26d-4c7e-b737-057d5afda2da-operator-scripts\") pod \"mysqld-exporter-0e28-account-create-update-xwvcx\" (UID: \"20c9ed62-b26d-4c7e-b737-057d5afda2da\") " pod="openstack/mysqld-exporter-0e28-account-create-update-xwvcx" Dec 09 15:31:24 crc kubenswrapper[4716]: I1209 15:31:24.285370 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-26vt7\" (UniqueName: \"kubernetes.io/projected/20c9ed62-b26d-4c7e-b737-057d5afda2da-kube-api-access-26vt7\") pod \"mysqld-exporter-0e28-account-create-update-xwvcx\" (UID: \"20c9ed62-b26d-4c7e-b737-057d5afda2da\") " pod="openstack/mysqld-exporter-0e28-account-create-update-xwvcx" Dec 09 15:31:24 crc kubenswrapper[4716]: I1209 15:31:24.378714 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0e28-account-create-update-xwvcx" Dec 09 15:31:24 crc kubenswrapper[4716]: I1209 15:31:24.578592 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Dec 09 15:31:25 crc kubenswrapper[4716]: I1209 15:31:25.361504 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-86db49b7ff-x9c54" podUID="c63bdfde-4ca8-4048-8508-a7be505e25f0" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.144:5353: i/o timeout" Dec 09 15:31:26 crc kubenswrapper[4716]: I1209 15:31:26.719968 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"be971cf7-04bf-4487-a95a-64bb6ea739a7","Type":"ContainerStarted","Data":"73878a8e6e6c560039ba66167d3b145a0ba1c67e7b7cc4295d8381ea0c4b041c"} Dec 09 15:31:26 crc kubenswrapper[4716]: I1209 15:31:26.990063 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0e28-account-create-update-xwvcx"] Dec 09 15:31:27 crc kubenswrapper[4716]: I1209 15:31:27.041799 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Dec 09 15:31:27 crc kubenswrapper[4716]: I1209 15:31:27.088453 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-rchbw"] Dec 09 15:31:27 crc kubenswrapper[4716]: I1209 15:31:27.089544 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Dec 09 15:31:27 crc kubenswrapper[4716]: W1209 15:31:27.104516 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb12de1dd_ac2d_4a05_af7d_f675b007109d.slice/crio-4f7ce0e3bff34ae97fc2171fd314fcb081a3d037aad82d8b1f96c35c623aadb6 WatchSource:0}: Error finding container 4f7ce0e3bff34ae97fc2171fd314fcb081a3d037aad82d8b1f96c35c623aadb6: Status 404 returned error can't find the container with id 4f7ce0e3bff34ae97fc2171fd314fcb081a3d037aad82d8b1f96c35c623aadb6 Dec 09 15:31:27 crc kubenswrapper[4716]: I1209 15:31:27.730241 4716 generic.go:334] "Generic (PLEG): container finished" podID="20c9ed62-b26d-4c7e-b737-057d5afda2da" containerID="c473a760322080d2abe30272df5e8cc64353825f9a1e2fa7f97a0cd8aed3def2" exitCode=0 Dec 09 15:31:27 crc kubenswrapper[4716]: I1209 15:31:27.730564 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0e28-account-create-update-xwvcx" event={"ID":"20c9ed62-b26d-4c7e-b737-057d5afda2da","Type":"ContainerDied","Data":"c473a760322080d2abe30272df5e8cc64353825f9a1e2fa7f97a0cd8aed3def2"} Dec 09 15:31:27 crc kubenswrapper[4716]: I1209 15:31:27.730692 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0e28-account-create-update-xwvcx" event={"ID":"20c9ed62-b26d-4c7e-b737-057d5afda2da","Type":"ContainerStarted","Data":"7c08bfac1be2e25c063e35a4da91a52bbae79fef8fffdf622b4cdb1f59bc41cf"} Dec 09 15:31:27 crc kubenswrapper[4716]: I1209 15:31:27.736196 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-hkncs" event={"ID":"5b9ed832-324f-4995-8277-6e979cfc7bc1","Type":"ContainerStarted","Data":"1fce9b9f0b4c94a5b3fecc856db4eb48e4c6d27005b576d452b4f50f371a97b9"} Dec 09 15:31:27 crc kubenswrapper[4716]: I1209 15:31:27.739164 4716 generic.go:334] "Generic (PLEG): container finished" podID="b12de1dd-ac2d-4a05-af7d-f675b007109d" containerID="f853614d4a6d711d88bcc719f44903caefb4a2897b919bb3037953a2ce6fb047" exitCode=0 Dec 09 15:31:27 crc kubenswrapper[4716]: I1209 15:31:27.740013 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-rchbw" event={"ID":"b12de1dd-ac2d-4a05-af7d-f675b007109d","Type":"ContainerDied","Data":"f853614d4a6d711d88bcc719f44903caefb4a2897b919bb3037953a2ce6fb047"} Dec 09 15:31:27 crc kubenswrapper[4716]: I1209 15:31:27.740042 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-rchbw" event={"ID":"b12de1dd-ac2d-4a05-af7d-f675b007109d","Type":"ContainerStarted","Data":"4f7ce0e3bff34ae97fc2171fd314fcb081a3d037aad82d8b1f96c35c623aadb6"} Dec 09 15:31:27 crc kubenswrapper[4716]: I1209 15:31:27.784015 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-hkncs" podStartSLOduration=8.618643026 podStartE2EDuration="12.783989087s" podCreationTimestamp="2025-12-09 15:31:15 +0000 UTC" firstStartedPulling="2025-12-09 15:31:22.398675566 +0000 UTC m=+1369.553419554" lastFinishedPulling="2025-12-09 15:31:26.564021627 +0000 UTC m=+1373.718765615" observedRunningTime="2025-12-09 15:31:27.767030988 +0000 UTC m=+1374.921774976" watchObservedRunningTime="2025-12-09 15:31:27.783989087 +0000 UTC m=+1374.938733075" Dec 09 15:31:27 crc kubenswrapper[4716]: I1209 15:31:27.797604 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Dec 09 15:31:28 crc kubenswrapper[4716]: I1209 15:31:27.991615 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Dec 09 15:31:28 crc kubenswrapper[4716]: I1209 15:31:27.994031 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Dec 09 15:31:28 crc kubenswrapper[4716]: I1209 15:31:28.004923 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Dec 09 15:31:28 crc kubenswrapper[4716]: I1209 15:31:28.005237 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-zhmqz" Dec 09 15:31:28 crc kubenswrapper[4716]: I1209 15:31:28.006108 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Dec 09 15:31:28 crc kubenswrapper[4716]: I1209 15:31:28.006289 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Dec 09 15:31:28 crc kubenswrapper[4716]: I1209 15:31:28.058166 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Dec 09 15:31:28 crc kubenswrapper[4716]: I1209 15:31:28.166876 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7fa17075-24ea-4f90-9c45-8444a101b2ce-config\") pod \"ovn-northd-0\" (UID: \"7fa17075-24ea-4f90-9c45-8444a101b2ce\") " pod="openstack/ovn-northd-0" Dec 09 15:31:28 crc kubenswrapper[4716]: I1209 15:31:28.166937 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/7fa17075-24ea-4f90-9c45-8444a101b2ce-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"7fa17075-24ea-4f90-9c45-8444a101b2ce\") " pod="openstack/ovn-northd-0" Dec 09 15:31:28 crc kubenswrapper[4716]: I1209 15:31:28.166961 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/7fa17075-24ea-4f90-9c45-8444a101b2ce-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"7fa17075-24ea-4f90-9c45-8444a101b2ce\") " pod="openstack/ovn-northd-0" Dec 09 15:31:28 crc kubenswrapper[4716]: I1209 15:31:28.166997 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7fa17075-24ea-4f90-9c45-8444a101b2ce-scripts\") pod \"ovn-northd-0\" (UID: \"7fa17075-24ea-4f90-9c45-8444a101b2ce\") " pod="openstack/ovn-northd-0" Dec 09 15:31:28 crc kubenswrapper[4716]: I1209 15:31:28.167022 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2fcct\" (UniqueName: \"kubernetes.io/projected/7fa17075-24ea-4f90-9c45-8444a101b2ce-kube-api-access-2fcct\") pod \"ovn-northd-0\" (UID: \"7fa17075-24ea-4f90-9c45-8444a101b2ce\") " pod="openstack/ovn-northd-0" Dec 09 15:31:28 crc kubenswrapper[4716]: I1209 15:31:28.167050 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7fa17075-24ea-4f90-9c45-8444a101b2ce-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"7fa17075-24ea-4f90-9c45-8444a101b2ce\") " pod="openstack/ovn-northd-0" Dec 09 15:31:28 crc kubenswrapper[4716]: I1209 15:31:28.167075 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/7fa17075-24ea-4f90-9c45-8444a101b2ce-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"7fa17075-24ea-4f90-9c45-8444a101b2ce\") " pod="openstack/ovn-northd-0" Dec 09 15:31:28 crc kubenswrapper[4716]: I1209 15:31:28.268757 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/7fa17075-24ea-4f90-9c45-8444a101b2ce-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"7fa17075-24ea-4f90-9c45-8444a101b2ce\") " pod="openstack/ovn-northd-0" Dec 09 15:31:28 crc kubenswrapper[4716]: I1209 15:31:28.268815 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/7fa17075-24ea-4f90-9c45-8444a101b2ce-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"7fa17075-24ea-4f90-9c45-8444a101b2ce\") " pod="openstack/ovn-northd-0" Dec 09 15:31:28 crc kubenswrapper[4716]: I1209 15:31:28.268872 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7fa17075-24ea-4f90-9c45-8444a101b2ce-scripts\") pod \"ovn-northd-0\" (UID: \"7fa17075-24ea-4f90-9c45-8444a101b2ce\") " pod="openstack/ovn-northd-0" Dec 09 15:31:28 crc kubenswrapper[4716]: I1209 15:31:28.268906 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2fcct\" (UniqueName: \"kubernetes.io/projected/7fa17075-24ea-4f90-9c45-8444a101b2ce-kube-api-access-2fcct\") pod \"ovn-northd-0\" (UID: \"7fa17075-24ea-4f90-9c45-8444a101b2ce\") " pod="openstack/ovn-northd-0" Dec 09 15:31:28 crc kubenswrapper[4716]: I1209 15:31:28.268952 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7fa17075-24ea-4f90-9c45-8444a101b2ce-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"7fa17075-24ea-4f90-9c45-8444a101b2ce\") " pod="openstack/ovn-northd-0" Dec 09 15:31:28 crc kubenswrapper[4716]: I1209 15:31:28.268992 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/7fa17075-24ea-4f90-9c45-8444a101b2ce-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"7fa17075-24ea-4f90-9c45-8444a101b2ce\") " pod="openstack/ovn-northd-0" Dec 09 15:31:28 crc kubenswrapper[4716]: I1209 15:31:28.269166 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7fa17075-24ea-4f90-9c45-8444a101b2ce-config\") pod \"ovn-northd-0\" (UID: \"7fa17075-24ea-4f90-9c45-8444a101b2ce\") " pod="openstack/ovn-northd-0" Dec 09 15:31:28 crc kubenswrapper[4716]: I1209 15:31:28.270214 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7fa17075-24ea-4f90-9c45-8444a101b2ce-config\") pod \"ovn-northd-0\" (UID: \"7fa17075-24ea-4f90-9c45-8444a101b2ce\") " pod="openstack/ovn-northd-0" Dec 09 15:31:28 crc kubenswrapper[4716]: I1209 15:31:28.270338 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/7fa17075-24ea-4f90-9c45-8444a101b2ce-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"7fa17075-24ea-4f90-9c45-8444a101b2ce\") " pod="openstack/ovn-northd-0" Dec 09 15:31:28 crc kubenswrapper[4716]: I1209 15:31:28.270663 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7fa17075-24ea-4f90-9c45-8444a101b2ce-scripts\") pod \"ovn-northd-0\" (UID: \"7fa17075-24ea-4f90-9c45-8444a101b2ce\") " pod="openstack/ovn-northd-0" Dec 09 15:31:28 crc kubenswrapper[4716]: I1209 15:31:28.276593 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7fa17075-24ea-4f90-9c45-8444a101b2ce-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"7fa17075-24ea-4f90-9c45-8444a101b2ce\") " pod="openstack/ovn-northd-0" Dec 09 15:31:28 crc kubenswrapper[4716]: I1209 15:31:28.280281 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/7fa17075-24ea-4f90-9c45-8444a101b2ce-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"7fa17075-24ea-4f90-9c45-8444a101b2ce\") " pod="openstack/ovn-northd-0" Dec 09 15:31:28 crc kubenswrapper[4716]: I1209 15:31:28.297698 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/7fa17075-24ea-4f90-9c45-8444a101b2ce-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"7fa17075-24ea-4f90-9c45-8444a101b2ce\") " pod="openstack/ovn-northd-0" Dec 09 15:31:28 crc kubenswrapper[4716]: I1209 15:31:28.313419 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2fcct\" (UniqueName: \"kubernetes.io/projected/7fa17075-24ea-4f90-9c45-8444a101b2ce-kube-api-access-2fcct\") pod \"ovn-northd-0\" (UID: \"7fa17075-24ea-4f90-9c45-8444a101b2ce\") " pod="openstack/ovn-northd-0" Dec 09 15:31:28 crc kubenswrapper[4716]: I1209 15:31:28.369572 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Dec 09 15:31:28 crc kubenswrapper[4716]: I1209 15:31:28.956549 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Dec 09 15:31:28 crc kubenswrapper[4716]: I1209 15:31:28.990802 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-qnztr"] Dec 09 15:31:28 crc kubenswrapper[4716]: I1209 15:31:28.992653 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-qnztr" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.000648 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-qnztr"] Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.092246 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/95ca86cf-1004-4d90-8ed7-c5e8277f2f84-operator-scripts\") pod \"keystone-db-create-qnztr\" (UID: \"95ca86cf-1004-4d90-8ed7-c5e8277f2f84\") " pod="openstack/keystone-db-create-qnztr" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.092855 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8r9p\" (UniqueName: \"kubernetes.io/projected/95ca86cf-1004-4d90-8ed7-c5e8277f2f84-kube-api-access-h8r9p\") pod \"keystone-db-create-qnztr\" (UID: \"95ca86cf-1004-4d90-8ed7-c5e8277f2f84\") " pod="openstack/keystone-db-create-qnztr" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.108794 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-698758b865-nwgsw" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.196876 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/95ca86cf-1004-4d90-8ed7-c5e8277f2f84-operator-scripts\") pod \"keystone-db-create-qnztr\" (UID: \"95ca86cf-1004-4d90-8ed7-c5e8277f2f84\") " pod="openstack/keystone-db-create-qnztr" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.197102 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8r9p\" (UniqueName: \"kubernetes.io/projected/95ca86cf-1004-4d90-8ed7-c5e8277f2f84-kube-api-access-h8r9p\") pod \"keystone-db-create-qnztr\" (UID: \"95ca86cf-1004-4d90-8ed7-c5e8277f2f84\") " pod="openstack/keystone-db-create-qnztr" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.198218 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-l6zl8"] Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.198585 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7fd796d7df-l6zl8" podUID="31f45c75-847f-4adc-8bdf-3438bf5f9a4c" containerName="dnsmasq-dns" containerID="cri-o://3feaedaf98acabce942a90f852214671fafe055ad4ec60b497d1342420742a85" gracePeriod=10 Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.198778 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/95ca86cf-1004-4d90-8ed7-c5e8277f2f84-operator-scripts\") pod \"keystone-db-create-qnztr\" (UID: \"95ca86cf-1004-4d90-8ed7-c5e8277f2f84\") " pod="openstack/keystone-db-create-qnztr" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.233694 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h8r9p\" (UniqueName: \"kubernetes.io/projected/95ca86cf-1004-4d90-8ed7-c5e8277f2f84-kube-api-access-h8r9p\") pod \"keystone-db-create-qnztr\" (UID: \"95ca86cf-1004-4d90-8ed7-c5e8277f2f84\") " pod="openstack/keystone-db-create-qnztr" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.291051 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-a78b-account-create-update-mhkd2"] Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.292723 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-a78b-account-create-update-mhkd2" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.307195 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-a78b-account-create-update-mhkd2"] Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.312088 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.353119 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-qnztr" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.374647 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-zftdd"] Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.376352 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-zftdd" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.398870 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-zftdd"] Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.410742 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eed32c8a-48cc-4162-93b6-7830c131a586-operator-scripts\") pod \"keystone-a78b-account-create-update-mhkd2\" (UID: \"eed32c8a-48cc-4162-93b6-7830c131a586\") " pod="openstack/keystone-a78b-account-create-update-mhkd2" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.410852 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-txgfj\" (UniqueName: \"kubernetes.io/projected/eed32c8a-48cc-4162-93b6-7830c131a586-kube-api-access-txgfj\") pod \"keystone-a78b-account-create-update-mhkd2\" (UID: \"eed32c8a-48cc-4162-93b6-7830c131a586\") " pod="openstack/keystone-a78b-account-create-update-mhkd2" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.535307 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d3dcd346-c1af-49fd-91f2-4ff1d1d3329b-operator-scripts\") pod \"placement-db-create-zftdd\" (UID: \"d3dcd346-c1af-49fd-91f2-4ff1d1d3329b\") " pod="openstack/placement-db-create-zftdd" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.535372 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-65qkz\" (UniqueName: \"kubernetes.io/projected/d3dcd346-c1af-49fd-91f2-4ff1d1d3329b-kube-api-access-65qkz\") pod \"placement-db-create-zftdd\" (UID: \"d3dcd346-c1af-49fd-91f2-4ff1d1d3329b\") " pod="openstack/placement-db-create-zftdd" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.535463 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eed32c8a-48cc-4162-93b6-7830c131a586-operator-scripts\") pod \"keystone-a78b-account-create-update-mhkd2\" (UID: \"eed32c8a-48cc-4162-93b6-7830c131a586\") " pod="openstack/keystone-a78b-account-create-update-mhkd2" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.535566 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-txgfj\" (UniqueName: \"kubernetes.io/projected/eed32c8a-48cc-4162-93b6-7830c131a586-kube-api-access-txgfj\") pod \"keystone-a78b-account-create-update-mhkd2\" (UID: \"eed32c8a-48cc-4162-93b6-7830c131a586\") " pod="openstack/keystone-a78b-account-create-update-mhkd2" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.536771 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eed32c8a-48cc-4162-93b6-7830c131a586-operator-scripts\") pod \"keystone-a78b-account-create-update-mhkd2\" (UID: \"eed32c8a-48cc-4162-93b6-7830c131a586\") " pod="openstack/keystone-a78b-account-create-update-mhkd2" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.563899 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0e28-account-create-update-xwvcx" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.580294 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-txgfj\" (UniqueName: \"kubernetes.io/projected/eed32c8a-48cc-4162-93b6-7830c131a586-kube-api-access-txgfj\") pod \"keystone-a78b-account-create-update-mhkd2\" (UID: \"eed32c8a-48cc-4162-93b6-7830c131a586\") " pod="openstack/keystone-a78b-account-create-update-mhkd2" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.611084 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-10b6-account-create-update-gnpg9"] Dec 09 15:31:29 crc kubenswrapper[4716]: E1209 15:31:29.611546 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20c9ed62-b26d-4c7e-b737-057d5afda2da" containerName="mariadb-account-create-update" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.611570 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="20c9ed62-b26d-4c7e-b737-057d5afda2da" containerName="mariadb-account-create-update" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.611906 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="20c9ed62-b26d-4c7e-b737-057d5afda2da" containerName="mariadb-account-create-update" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.613106 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-10b6-account-create-update-gnpg9" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.616901 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.633830 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-10b6-account-create-update-gnpg9"] Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.636740 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-26vt7\" (UniqueName: \"kubernetes.io/projected/20c9ed62-b26d-4c7e-b737-057d5afda2da-kube-api-access-26vt7\") pod \"20c9ed62-b26d-4c7e-b737-057d5afda2da\" (UID: \"20c9ed62-b26d-4c7e-b737-057d5afda2da\") " Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.636813 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/20c9ed62-b26d-4c7e-b737-057d5afda2da-operator-scripts\") pod \"20c9ed62-b26d-4c7e-b737-057d5afda2da\" (UID: \"20c9ed62-b26d-4c7e-b737-057d5afda2da\") " Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.638607 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0f8150e8-0d21-4e44-bfda-32a724caf6ad-operator-scripts\") pod \"placement-10b6-account-create-update-gnpg9\" (UID: \"0f8150e8-0d21-4e44-bfda-32a724caf6ad\") " pod="openstack/placement-10b6-account-create-update-gnpg9" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.639450 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jtldx\" (UniqueName: \"kubernetes.io/projected/0f8150e8-0d21-4e44-bfda-32a724caf6ad-kube-api-access-jtldx\") pod \"placement-10b6-account-create-update-gnpg9\" (UID: \"0f8150e8-0d21-4e44-bfda-32a724caf6ad\") " pod="openstack/placement-10b6-account-create-update-gnpg9" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.639675 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d3dcd346-c1af-49fd-91f2-4ff1d1d3329b-operator-scripts\") pod \"placement-db-create-zftdd\" (UID: \"d3dcd346-c1af-49fd-91f2-4ff1d1d3329b\") " pod="openstack/placement-db-create-zftdd" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.639743 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-65qkz\" (UniqueName: \"kubernetes.io/projected/d3dcd346-c1af-49fd-91f2-4ff1d1d3329b-kube-api-access-65qkz\") pod \"placement-db-create-zftdd\" (UID: \"d3dcd346-c1af-49fd-91f2-4ff1d1d3329b\") " pod="openstack/placement-db-create-zftdd" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.640680 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d3dcd346-c1af-49fd-91f2-4ff1d1d3329b-operator-scripts\") pod \"placement-db-create-zftdd\" (UID: \"d3dcd346-c1af-49fd-91f2-4ff1d1d3329b\") " pod="openstack/placement-db-create-zftdd" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.640725 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/20c9ed62-b26d-4c7e-b737-057d5afda2da-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "20c9ed62-b26d-4c7e-b737-057d5afda2da" (UID: "20c9ed62-b26d-4c7e-b737-057d5afda2da"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.641657 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-rchbw" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.660340 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20c9ed62-b26d-4c7e-b737-057d5afda2da-kube-api-access-26vt7" (OuterVolumeSpecName: "kube-api-access-26vt7") pod "20c9ed62-b26d-4c7e-b737-057d5afda2da" (UID: "20c9ed62-b26d-4c7e-b737-057d5afda2da"). InnerVolumeSpecName "kube-api-access-26vt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.677490 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-65qkz\" (UniqueName: \"kubernetes.io/projected/d3dcd346-c1af-49fd-91f2-4ff1d1d3329b-kube-api-access-65qkz\") pod \"placement-db-create-zftdd\" (UID: \"d3dcd346-c1af-49fd-91f2-4ff1d1d3329b\") " pod="openstack/placement-db-create-zftdd" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.681376 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-wgl8z"] Dec 09 15:31:29 crc kubenswrapper[4716]: E1209 15:31:29.681849 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b12de1dd-ac2d-4a05-af7d-f675b007109d" containerName="mariadb-database-create" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.681868 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="b12de1dd-ac2d-4a05-af7d-f675b007109d" containerName="mariadb-database-create" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.682112 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="b12de1dd-ac2d-4a05-af7d-f675b007109d" containerName="mariadb-database-create" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.682868 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-wgl8z" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.711059 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-wgl8z"] Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.742298 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtldx\" (UniqueName: \"kubernetes.io/projected/0f8150e8-0d21-4e44-bfda-32a724caf6ad-kube-api-access-jtldx\") pod \"placement-10b6-account-create-update-gnpg9\" (UID: \"0f8150e8-0d21-4e44-bfda-32a724caf6ad\") " pod="openstack/placement-10b6-account-create-update-gnpg9" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.742477 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0f8150e8-0d21-4e44-bfda-32a724caf6ad-operator-scripts\") pod \"placement-10b6-account-create-update-gnpg9\" (UID: \"0f8150e8-0d21-4e44-bfda-32a724caf6ad\") " pod="openstack/placement-10b6-account-create-update-gnpg9" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.745513 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-26vt7\" (UniqueName: \"kubernetes.io/projected/20c9ed62-b26d-4c7e-b737-057d5afda2da-kube-api-access-26vt7\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.745666 4716 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/20c9ed62-b26d-4c7e-b737-057d5afda2da-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.746294 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0f8150e8-0d21-4e44-bfda-32a724caf6ad-operator-scripts\") pod \"placement-10b6-account-create-update-gnpg9\" (UID: \"0f8150e8-0d21-4e44-bfda-32a724caf6ad\") " pod="openstack/placement-10b6-account-create-update-gnpg9" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.773277 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jtldx\" (UniqueName: \"kubernetes.io/projected/0f8150e8-0d21-4e44-bfda-32a724caf6ad-kube-api-access-jtldx\") pod \"placement-10b6-account-create-update-gnpg9\" (UID: \"0f8150e8-0d21-4e44-bfda-32a724caf6ad\") " pod="openstack/placement-10b6-account-create-update-gnpg9" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.782944 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-83bd-account-create-update-xs2rb"] Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.784562 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-83bd-account-create-update-xs2rb" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.794140 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.795761 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-83bd-account-create-update-xs2rb"] Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.807116 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0e28-account-create-update-xwvcx" event={"ID":"20c9ed62-b26d-4c7e-b737-057d5afda2da","Type":"ContainerDied","Data":"7c08bfac1be2e25c063e35a4da91a52bbae79fef8fffdf622b4cdb1f59bc41cf"} Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.807165 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7c08bfac1be2e25c063e35a4da91a52bbae79fef8fffdf622b4cdb1f59bc41cf" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.807457 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0e28-account-create-update-xwvcx" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.823114 4716 generic.go:334] "Generic (PLEG): container finished" podID="31f45c75-847f-4adc-8bdf-3438bf5f9a4c" containerID="3feaedaf98acabce942a90f852214671fafe055ad4ec60b497d1342420742a85" exitCode=0 Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.823190 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-l6zl8" event={"ID":"31f45c75-847f-4adc-8bdf-3438bf5f9a4c","Type":"ContainerDied","Data":"3feaedaf98acabce942a90f852214671fafe055ad4ec60b497d1342420742a85"} Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.825735 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"7fa17075-24ea-4f90-9c45-8444a101b2ce","Type":"ContainerStarted","Data":"4ecfe18f8448e4e2a7a3f0068ee4d1862f8a591d7dc702996072eef68f70c3d3"} Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.828000 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-rchbw" event={"ID":"b12de1dd-ac2d-4a05-af7d-f675b007109d","Type":"ContainerDied","Data":"4f7ce0e3bff34ae97fc2171fd314fcb081a3d037aad82d8b1f96c35c623aadb6"} Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.828071 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-rchbw" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.828085 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4f7ce0e3bff34ae97fc2171fd314fcb081a3d037aad82d8b1f96c35c623aadb6" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.843182 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-a78b-account-create-update-mhkd2" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.846968 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b12de1dd-ac2d-4a05-af7d-f675b007109d-operator-scripts\") pod \"b12de1dd-ac2d-4a05-af7d-f675b007109d\" (UID: \"b12de1dd-ac2d-4a05-af7d-f675b007109d\") " Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.847081 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g7rxx\" (UniqueName: \"kubernetes.io/projected/b12de1dd-ac2d-4a05-af7d-f675b007109d-kube-api-access-g7rxx\") pod \"b12de1dd-ac2d-4a05-af7d-f675b007109d\" (UID: \"b12de1dd-ac2d-4a05-af7d-f675b007109d\") " Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.847675 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69dm8\" (UniqueName: \"kubernetes.io/projected/7b147869-4532-4858-afbe-5280be85584a-kube-api-access-69dm8\") pod \"glance-db-create-wgl8z\" (UID: \"7b147869-4532-4858-afbe-5280be85584a\") " pod="openstack/glance-db-create-wgl8z" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.847789 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7b147869-4532-4858-afbe-5280be85584a-operator-scripts\") pod \"glance-db-create-wgl8z\" (UID: \"7b147869-4532-4858-afbe-5280be85584a\") " pod="openstack/glance-db-create-wgl8z" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.848651 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b12de1dd-ac2d-4a05-af7d-f675b007109d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b12de1dd-ac2d-4a05-af7d-f675b007109d" (UID: "b12de1dd-ac2d-4a05-af7d-f675b007109d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.852464 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b12de1dd-ac2d-4a05-af7d-f675b007109d-kube-api-access-g7rxx" (OuterVolumeSpecName: "kube-api-access-g7rxx") pod "b12de1dd-ac2d-4a05-af7d-f675b007109d" (UID: "b12de1dd-ac2d-4a05-af7d-f675b007109d"). InnerVolumeSpecName "kube-api-access-g7rxx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.876182 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-l6zl8" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.911359 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-zftdd" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.950510 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nbj4p\" (UniqueName: \"kubernetes.io/projected/e36549f8-8725-4e36-840b-1bdaa80c2e52-kube-api-access-nbj4p\") pod \"glance-83bd-account-create-update-xs2rb\" (UID: \"e36549f8-8725-4e36-840b-1bdaa80c2e52\") " pod="openstack/glance-83bd-account-create-update-xs2rb" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.950681 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e36549f8-8725-4e36-840b-1bdaa80c2e52-operator-scripts\") pod \"glance-83bd-account-create-update-xs2rb\" (UID: \"e36549f8-8725-4e36-840b-1bdaa80c2e52\") " pod="openstack/glance-83bd-account-create-update-xs2rb" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.951913 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-69dm8\" (UniqueName: \"kubernetes.io/projected/7b147869-4532-4858-afbe-5280be85584a-kube-api-access-69dm8\") pod \"glance-db-create-wgl8z\" (UID: \"7b147869-4532-4858-afbe-5280be85584a\") " pod="openstack/glance-db-create-wgl8z" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.952121 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7b147869-4532-4858-afbe-5280be85584a-operator-scripts\") pod \"glance-db-create-wgl8z\" (UID: \"7b147869-4532-4858-afbe-5280be85584a\") " pod="openstack/glance-db-create-wgl8z" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.952276 4716 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b12de1dd-ac2d-4a05-af7d-f675b007109d-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.952289 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g7rxx\" (UniqueName: \"kubernetes.io/projected/b12de1dd-ac2d-4a05-af7d-f675b007109d-kube-api-access-g7rxx\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.953184 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7b147869-4532-4858-afbe-5280be85584a-operator-scripts\") pod \"glance-db-create-wgl8z\" (UID: \"7b147869-4532-4858-afbe-5280be85584a\") " pod="openstack/glance-db-create-wgl8z" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.969580 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-10b6-account-create-update-gnpg9" Dec 09 15:31:29 crc kubenswrapper[4716]: I1209 15:31:29.975440 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-69dm8\" (UniqueName: \"kubernetes.io/projected/7b147869-4532-4858-afbe-5280be85584a-kube-api-access-69dm8\") pod \"glance-db-create-wgl8z\" (UID: \"7b147869-4532-4858-afbe-5280be85584a\") " pod="openstack/glance-db-create-wgl8z" Dec 09 15:31:30 crc kubenswrapper[4716]: I1209 15:31:30.008265 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-wgl8z" Dec 09 15:31:30 crc kubenswrapper[4716]: I1209 15:31:30.053850 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/31f45c75-847f-4adc-8bdf-3438bf5f9a4c-ovsdbserver-nb\") pod \"31f45c75-847f-4adc-8bdf-3438bf5f9a4c\" (UID: \"31f45c75-847f-4adc-8bdf-3438bf5f9a4c\") " Dec 09 15:31:30 crc kubenswrapper[4716]: I1209 15:31:30.053980 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31f45c75-847f-4adc-8bdf-3438bf5f9a4c-config\") pod \"31f45c75-847f-4adc-8bdf-3438bf5f9a4c\" (UID: \"31f45c75-847f-4adc-8bdf-3438bf5f9a4c\") " Dec 09 15:31:30 crc kubenswrapper[4716]: I1209 15:31:30.054053 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/31f45c75-847f-4adc-8bdf-3438bf5f9a4c-dns-svc\") pod \"31f45c75-847f-4adc-8bdf-3438bf5f9a4c\" (UID: \"31f45c75-847f-4adc-8bdf-3438bf5f9a4c\") " Dec 09 15:31:30 crc kubenswrapper[4716]: I1209 15:31:30.054103 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nswhz\" (UniqueName: \"kubernetes.io/projected/31f45c75-847f-4adc-8bdf-3438bf5f9a4c-kube-api-access-nswhz\") pod \"31f45c75-847f-4adc-8bdf-3438bf5f9a4c\" (UID: \"31f45c75-847f-4adc-8bdf-3438bf5f9a4c\") " Dec 09 15:31:30 crc kubenswrapper[4716]: I1209 15:31:30.054395 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e36549f8-8725-4e36-840b-1bdaa80c2e52-operator-scripts\") pod \"glance-83bd-account-create-update-xs2rb\" (UID: \"e36549f8-8725-4e36-840b-1bdaa80c2e52\") " pod="openstack/glance-83bd-account-create-update-xs2rb" Dec 09 15:31:30 crc kubenswrapper[4716]: I1209 15:31:30.054594 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nbj4p\" (UniqueName: \"kubernetes.io/projected/e36549f8-8725-4e36-840b-1bdaa80c2e52-kube-api-access-nbj4p\") pod \"glance-83bd-account-create-update-xs2rb\" (UID: \"e36549f8-8725-4e36-840b-1bdaa80c2e52\") " pod="openstack/glance-83bd-account-create-update-xs2rb" Dec 09 15:31:30 crc kubenswrapper[4716]: I1209 15:31:30.059147 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e36549f8-8725-4e36-840b-1bdaa80c2e52-operator-scripts\") pod \"glance-83bd-account-create-update-xs2rb\" (UID: \"e36549f8-8725-4e36-840b-1bdaa80c2e52\") " pod="openstack/glance-83bd-account-create-update-xs2rb" Dec 09 15:31:30 crc kubenswrapper[4716]: I1209 15:31:30.071154 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31f45c75-847f-4adc-8bdf-3438bf5f9a4c-kube-api-access-nswhz" (OuterVolumeSpecName: "kube-api-access-nswhz") pod "31f45c75-847f-4adc-8bdf-3438bf5f9a4c" (UID: "31f45c75-847f-4adc-8bdf-3438bf5f9a4c"). InnerVolumeSpecName "kube-api-access-nswhz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:31:30 crc kubenswrapper[4716]: I1209 15:31:30.078371 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-qnztr"] Dec 09 15:31:30 crc kubenswrapper[4716]: I1209 15:31:30.095679 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nbj4p\" (UniqueName: \"kubernetes.io/projected/e36549f8-8725-4e36-840b-1bdaa80c2e52-kube-api-access-nbj4p\") pod \"glance-83bd-account-create-update-xs2rb\" (UID: \"e36549f8-8725-4e36-840b-1bdaa80c2e52\") " pod="openstack/glance-83bd-account-create-update-xs2rb" Dec 09 15:31:30 crc kubenswrapper[4716]: I1209 15:31:30.123371 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-83bd-account-create-update-xs2rb" Dec 09 15:31:30 crc kubenswrapper[4716]: I1209 15:31:30.162978 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nswhz\" (UniqueName: \"kubernetes.io/projected/31f45c75-847f-4adc-8bdf-3438bf5f9a4c-kube-api-access-nswhz\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:30 crc kubenswrapper[4716]: I1209 15:31:30.213220 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31f45c75-847f-4adc-8bdf-3438bf5f9a4c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "31f45c75-847f-4adc-8bdf-3438bf5f9a4c" (UID: "31f45c75-847f-4adc-8bdf-3438bf5f9a4c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:31:30 crc kubenswrapper[4716]: I1209 15:31:30.279575 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31f45c75-847f-4adc-8bdf-3438bf5f9a4c-config" (OuterVolumeSpecName: "config") pod "31f45c75-847f-4adc-8bdf-3438bf5f9a4c" (UID: "31f45c75-847f-4adc-8bdf-3438bf5f9a4c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:31:30 crc kubenswrapper[4716]: I1209 15:31:30.284855 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31f45c75-847f-4adc-8bdf-3438bf5f9a4c-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:30 crc kubenswrapper[4716]: I1209 15:31:30.284949 4716 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/31f45c75-847f-4adc-8bdf-3438bf5f9a4c-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:30 crc kubenswrapper[4716]: I1209 15:31:30.326457 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31f45c75-847f-4adc-8bdf-3438bf5f9a4c-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "31f45c75-847f-4adc-8bdf-3438bf5f9a4c" (UID: "31f45c75-847f-4adc-8bdf-3438bf5f9a4c"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:31:30 crc kubenswrapper[4716]: I1209 15:31:30.386436 4716 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/31f45c75-847f-4adc-8bdf-3438bf5f9a4c-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:30 crc kubenswrapper[4716]: I1209 15:31:30.572411 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-a78b-account-create-update-mhkd2"] Dec 09 15:31:30 crc kubenswrapper[4716]: I1209 15:31:30.587050 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-zftdd"] Dec 09 15:31:30 crc kubenswrapper[4716]: I1209 15:31:30.819836 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-10b6-account-create-update-gnpg9"] Dec 09 15:31:30 crc kubenswrapper[4716]: I1209 15:31:30.840072 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-l6zl8" Dec 09 15:31:30 crc kubenswrapper[4716]: I1209 15:31:30.840071 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-l6zl8" event={"ID":"31f45c75-847f-4adc-8bdf-3438bf5f9a4c","Type":"ContainerDied","Data":"ef645e20eee874884d4a2dd91862c778f7ba28b1c1f979c252b63b8d5284f37e"} Dec 09 15:31:30 crc kubenswrapper[4716]: I1209 15:31:30.840224 4716 scope.go:117] "RemoveContainer" containerID="3feaedaf98acabce942a90f852214671fafe055ad4ec60b497d1342420742a85" Dec 09 15:31:30 crc kubenswrapper[4716]: I1209 15:31:30.842329 4716 generic.go:334] "Generic (PLEG): container finished" podID="95ca86cf-1004-4d90-8ed7-c5e8277f2f84" containerID="3fda476c8fc95d2325720e50f8dfdea3525cb3be15bd404b76844562f3c648c9" exitCode=0 Dec 09 15:31:30 crc kubenswrapper[4716]: I1209 15:31:30.842354 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-qnztr" event={"ID":"95ca86cf-1004-4d90-8ed7-c5e8277f2f84","Type":"ContainerDied","Data":"3fda476c8fc95d2325720e50f8dfdea3525cb3be15bd404b76844562f3c648c9"} Dec 09 15:31:30 crc kubenswrapper[4716]: I1209 15:31:30.842556 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-qnztr" event={"ID":"95ca86cf-1004-4d90-8ed7-c5e8277f2f84","Type":"ContainerStarted","Data":"8783df37d4a76dcbf7f140f62c957deedcb9c49a8df60ab8e2d16318a010b246"} Dec 09 15:31:30 crc kubenswrapper[4716]: I1209 15:31:30.896593 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-l6zl8"] Dec 09 15:31:30 crc kubenswrapper[4716]: I1209 15:31:30.909212 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-l6zl8"] Dec 09 15:31:30 crc kubenswrapper[4716]: E1209 15:31:30.910027 4716 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 09 15:31:30 crc kubenswrapper[4716]: E1209 15:31:30.910065 4716 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 09 15:31:30 crc kubenswrapper[4716]: E1209 15:31:30.910162 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/552d079e-332b-46db-946b-2777875f3dc7-etc-swift podName:552d079e-332b-46db-946b-2777875f3dc7 nodeName:}" failed. No retries permitted until 2025-12-09 15:31:46.910112866 +0000 UTC m=+1394.064856864 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/552d079e-332b-46db-946b-2777875f3dc7-etc-swift") pod "swift-storage-0" (UID: "552d079e-332b-46db-946b-2777875f3dc7") : configmap "swift-ring-files" not found Dec 09 15:31:30 crc kubenswrapper[4716]: I1209 15:31:30.909541 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/552d079e-332b-46db-946b-2777875f3dc7-etc-swift\") pod \"swift-storage-0\" (UID: \"552d079e-332b-46db-946b-2777875f3dc7\") " pod="openstack/swift-storage-0" Dec 09 15:31:30 crc kubenswrapper[4716]: W1209 15:31:30.912478 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd3dcd346_c1af_49fd_91f2_4ff1d1d3329b.slice/crio-a56657d2f454e615b0f1bda6ca18ca9b4e7f1b84bed5519cbbd06cbb8e2ca777 WatchSource:0}: Error finding container a56657d2f454e615b0f1bda6ca18ca9b4e7f1b84bed5519cbbd06cbb8e2ca777: Status 404 returned error can't find the container with id a56657d2f454e615b0f1bda6ca18ca9b4e7f1b84bed5519cbbd06cbb8e2ca777 Dec 09 15:31:30 crc kubenswrapper[4716]: W1209 15:31:30.922008 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0f8150e8_0d21_4e44_bfda_32a724caf6ad.slice/crio-719e9023c5c8cd7b80c17255c715ec8a9136e0d00956560814740b414b51b8c8 WatchSource:0}: Error finding container 719e9023c5c8cd7b80c17255c715ec8a9136e0d00956560814740b414b51b8c8: Status 404 returned error can't find the container with id 719e9023c5c8cd7b80c17255c715ec8a9136e0d00956560814740b414b51b8c8 Dec 09 15:31:31 crc kubenswrapper[4716]: I1209 15:31:31.000681 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-83bd-account-create-update-xs2rb"] Dec 09 15:31:31 crc kubenswrapper[4716]: I1209 15:31:31.007006 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-wgl8z"] Dec 09 15:31:31 crc kubenswrapper[4716]: I1209 15:31:31.228060 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31f45c75-847f-4adc-8bdf-3438bf5f9a4c" path="/var/lib/kubelet/pods/31f45c75-847f-4adc-8bdf-3438bf5f9a4c/volumes" Dec 09 15:31:31 crc kubenswrapper[4716]: I1209 15:31:31.864388 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-10b6-account-create-update-gnpg9" event={"ID":"0f8150e8-0d21-4e44-bfda-32a724caf6ad","Type":"ContainerStarted","Data":"719e9023c5c8cd7b80c17255c715ec8a9136e0d00956560814740b414b51b8c8"} Dec 09 15:31:31 crc kubenswrapper[4716]: I1209 15:31:31.867218 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-a78b-account-create-update-mhkd2" event={"ID":"eed32c8a-48cc-4162-93b6-7830c131a586","Type":"ContainerStarted","Data":"82de814c6e1ad29d4f88aa8829eb3da588e3118f9451b8235b919866665f3148"} Dec 09 15:31:31 crc kubenswrapper[4716]: I1209 15:31:31.868215 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-zftdd" event={"ID":"d3dcd346-c1af-49fd-91f2-4ff1d1d3329b","Type":"ContainerStarted","Data":"a56657d2f454e615b0f1bda6ca18ca9b4e7f1b84bed5519cbbd06cbb8e2ca777"} Dec 09 15:31:31 crc kubenswrapper[4716]: I1209 15:31:31.892640 4716 scope.go:117] "RemoveContainer" containerID="aa481eeebdcda48756a0218836d53c906c8690e011581e52b182ec8a76808edc" Dec 09 15:31:31 crc kubenswrapper[4716]: W1209 15:31:31.905962 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode36549f8_8725_4e36_840b_1bdaa80c2e52.slice/crio-e0528881a933680cfcb5d788b5a5d2a023dc3cbf862b44294fd98c7e6413d3b0 WatchSource:0}: Error finding container e0528881a933680cfcb5d788b5a5d2a023dc3cbf862b44294fd98c7e6413d3b0: Status 404 returned error can't find the container with id e0528881a933680cfcb5d788b5a5d2a023dc3cbf862b44294fd98c7e6413d3b0 Dec 09 15:31:31 crc kubenswrapper[4716]: W1209 15:31:31.907363 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7b147869_4532_4858_afbe_5280be85584a.slice/crio-44ff19a753eab99ba6d5b1d1742287b2398f96eee85cbfd18337ef2ac3784127 WatchSource:0}: Error finding container 44ff19a753eab99ba6d5b1d1742287b2398f96eee85cbfd18337ef2ac3784127: Status 404 returned error can't find the container with id 44ff19a753eab99ba6d5b1d1742287b2398f96eee85cbfd18337ef2ac3784127 Dec 09 15:31:32 crc kubenswrapper[4716]: I1209 15:31:32.424402 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-qnztr" Dec 09 15:31:32 crc kubenswrapper[4716]: I1209 15:31:32.549491 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h8r9p\" (UniqueName: \"kubernetes.io/projected/95ca86cf-1004-4d90-8ed7-c5e8277f2f84-kube-api-access-h8r9p\") pod \"95ca86cf-1004-4d90-8ed7-c5e8277f2f84\" (UID: \"95ca86cf-1004-4d90-8ed7-c5e8277f2f84\") " Dec 09 15:31:32 crc kubenswrapper[4716]: I1209 15:31:32.549919 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/95ca86cf-1004-4d90-8ed7-c5e8277f2f84-operator-scripts\") pod \"95ca86cf-1004-4d90-8ed7-c5e8277f2f84\" (UID: \"95ca86cf-1004-4d90-8ed7-c5e8277f2f84\") " Dec 09 15:31:32 crc kubenswrapper[4716]: I1209 15:31:32.550474 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/95ca86cf-1004-4d90-8ed7-c5e8277f2f84-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "95ca86cf-1004-4d90-8ed7-c5e8277f2f84" (UID: "95ca86cf-1004-4d90-8ed7-c5e8277f2f84"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:31:32 crc kubenswrapper[4716]: I1209 15:31:32.550983 4716 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/95ca86cf-1004-4d90-8ed7-c5e8277f2f84-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:32 crc kubenswrapper[4716]: I1209 15:31:32.556942 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/95ca86cf-1004-4d90-8ed7-c5e8277f2f84-kube-api-access-h8r9p" (OuterVolumeSpecName: "kube-api-access-h8r9p") pod "95ca86cf-1004-4d90-8ed7-c5e8277f2f84" (UID: "95ca86cf-1004-4d90-8ed7-c5e8277f2f84"). InnerVolumeSpecName "kube-api-access-h8r9p". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:31:32 crc kubenswrapper[4716]: I1209 15:31:32.653784 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h8r9p\" (UniqueName: \"kubernetes.io/projected/95ca86cf-1004-4d90-8ed7-c5e8277f2f84-kube-api-access-h8r9p\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:32 crc kubenswrapper[4716]: I1209 15:31:32.884584 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"be971cf7-04bf-4487-a95a-64bb6ea739a7","Type":"ContainerStarted","Data":"233616a3e553356b3b94b254ac098b7f41f0c48aae3f0b388d21c65c8e9fd525"} Dec 09 15:31:32 crc kubenswrapper[4716]: I1209 15:31:32.889241 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-10b6-account-create-update-gnpg9" event={"ID":"0f8150e8-0d21-4e44-bfda-32a724caf6ad","Type":"ContainerStarted","Data":"858976ae7428005951774bd37e3d75762c1000efe5152e7e5b5133ab6b02ad31"} Dec 09 15:31:32 crc kubenswrapper[4716]: I1209 15:31:32.894800 4716 generic.go:334] "Generic (PLEG): container finished" podID="eed32c8a-48cc-4162-93b6-7830c131a586" containerID="5ae6cd3f51234d7418529cd76d81323bd7cd2f60177378f4732707c4e95c821a" exitCode=0 Dec 09 15:31:32 crc kubenswrapper[4716]: I1209 15:31:32.894875 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-a78b-account-create-update-mhkd2" event={"ID":"eed32c8a-48cc-4162-93b6-7830c131a586","Type":"ContainerDied","Data":"5ae6cd3f51234d7418529cd76d81323bd7cd2f60177378f4732707c4e95c821a"} Dec 09 15:31:32 crc kubenswrapper[4716]: I1209 15:31:32.897106 4716 generic.go:334] "Generic (PLEG): container finished" podID="7b147869-4532-4858-afbe-5280be85584a" containerID="2e9050cfa87e8b69d7fb47e9180d456507eb680327214b0c7a0db3b4b458aa24" exitCode=0 Dec 09 15:31:32 crc kubenswrapper[4716]: I1209 15:31:32.897174 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-wgl8z" event={"ID":"7b147869-4532-4858-afbe-5280be85584a","Type":"ContainerDied","Data":"2e9050cfa87e8b69d7fb47e9180d456507eb680327214b0c7a0db3b4b458aa24"} Dec 09 15:31:32 crc kubenswrapper[4716]: I1209 15:31:32.897199 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-wgl8z" event={"ID":"7b147869-4532-4858-afbe-5280be85584a","Type":"ContainerStarted","Data":"44ff19a753eab99ba6d5b1d1742287b2398f96eee85cbfd18337ef2ac3784127"} Dec 09 15:31:32 crc kubenswrapper[4716]: I1209 15:31:32.900493 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"7fa17075-24ea-4f90-9c45-8444a101b2ce","Type":"ContainerStarted","Data":"66eb385f748c8220f106086ba51715d417ca03a82ff23eb21495883fed99d9fa"} Dec 09 15:31:32 crc kubenswrapper[4716]: I1209 15:31:32.900600 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"7fa17075-24ea-4f90-9c45-8444a101b2ce","Type":"ContainerStarted","Data":"cd2f4542634280e1fc743f0c69f81c7f86e7904169f89c368799162a2fe793be"} Dec 09 15:31:32 crc kubenswrapper[4716]: I1209 15:31:32.902123 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Dec 09 15:31:32 crc kubenswrapper[4716]: I1209 15:31:32.904941 4716 generic.go:334] "Generic (PLEG): container finished" podID="d3dcd346-c1af-49fd-91f2-4ff1d1d3329b" containerID="197798eebb7dc48dc6e78ccd6cb07a25c1391dd979725dc192380f71455fbda8" exitCode=0 Dec 09 15:31:32 crc kubenswrapper[4716]: I1209 15:31:32.905035 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-zftdd" event={"ID":"d3dcd346-c1af-49fd-91f2-4ff1d1d3329b","Type":"ContainerDied","Data":"197798eebb7dc48dc6e78ccd6cb07a25c1391dd979725dc192380f71455fbda8"} Dec 09 15:31:32 crc kubenswrapper[4716]: I1209 15:31:32.912791 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-qnztr" event={"ID":"95ca86cf-1004-4d90-8ed7-c5e8277f2f84","Type":"ContainerDied","Data":"8783df37d4a76dcbf7f140f62c957deedcb9c49a8df60ab8e2d16318a010b246"} Dec 09 15:31:32 crc kubenswrapper[4716]: I1209 15:31:32.912834 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8783df37d4a76dcbf7f140f62c957deedcb9c49a8df60ab8e2d16318a010b246" Dec 09 15:31:32 crc kubenswrapper[4716]: I1209 15:31:32.912893 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-qnztr" Dec 09 15:31:32 crc kubenswrapper[4716]: I1209 15:31:32.915454 4716 generic.go:334] "Generic (PLEG): container finished" podID="e36549f8-8725-4e36-840b-1bdaa80c2e52" containerID="1634feab212b94e9f8e253b1f32db52e3800581b526ffe3408c569c83c98a038" exitCode=0 Dec 09 15:31:32 crc kubenswrapper[4716]: I1209 15:31:32.915526 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-83bd-account-create-update-xs2rb" event={"ID":"e36549f8-8725-4e36-840b-1bdaa80c2e52","Type":"ContainerDied","Data":"1634feab212b94e9f8e253b1f32db52e3800581b526ffe3408c569c83c98a038"} Dec 09 15:31:32 crc kubenswrapper[4716]: I1209 15:31:32.915560 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-83bd-account-create-update-xs2rb" event={"ID":"e36549f8-8725-4e36-840b-1bdaa80c2e52","Type":"ContainerStarted","Data":"e0528881a933680cfcb5d788b5a5d2a023dc3cbf862b44294fd98c7e6413d3b0"} Dec 09 15:31:32 crc kubenswrapper[4716]: I1209 15:31:32.916697 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=4.732713195 podStartE2EDuration="1m9.916665577s" podCreationTimestamp="2025-12-09 15:30:23 +0000 UTC" firstStartedPulling="2025-12-09 15:30:26.901943421 +0000 UTC m=+1314.056687409" lastFinishedPulling="2025-12-09 15:31:32.085895803 +0000 UTC m=+1379.240639791" observedRunningTime="2025-12-09 15:31:32.905758383 +0000 UTC m=+1380.060502371" watchObservedRunningTime="2025-12-09 15:31:32.916665577 +0000 UTC m=+1380.071409565" Dec 09 15:31:32 crc kubenswrapper[4716]: I1209 15:31:32.977002 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.9557279899999997 podStartE2EDuration="5.976980975s" podCreationTimestamp="2025-12-09 15:31:27 +0000 UTC" firstStartedPulling="2025-12-09 15:31:28.976035704 +0000 UTC m=+1376.130779692" lastFinishedPulling="2025-12-09 15:31:31.997288689 +0000 UTC m=+1379.152032677" observedRunningTime="2025-12-09 15:31:32.975932155 +0000 UTC m=+1380.130676143" watchObservedRunningTime="2025-12-09 15:31:32.976980975 +0000 UTC m=+1380.131724963" Dec 09 15:31:33 crc kubenswrapper[4716]: I1209 15:31:33.928597 4716 generic.go:334] "Generic (PLEG): container finished" podID="0f8150e8-0d21-4e44-bfda-32a724caf6ad" containerID="858976ae7428005951774bd37e3d75762c1000efe5152e7e5b5133ab6b02ad31" exitCode=0 Dec 09 15:31:33 crc kubenswrapper[4716]: I1209 15:31:33.928661 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-10b6-account-create-update-gnpg9" event={"ID":"0f8150e8-0d21-4e44-bfda-32a724caf6ad","Type":"ContainerDied","Data":"858976ae7428005951774bd37e3d75762c1000efe5152e7e5b5133ab6b02ad31"} Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.407326 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-jwqjv"] Dec 09 15:31:34 crc kubenswrapper[4716]: E1209 15:31:34.408520 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95ca86cf-1004-4d90-8ed7-c5e8277f2f84" containerName="mariadb-database-create" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.408538 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="95ca86cf-1004-4d90-8ed7-c5e8277f2f84" containerName="mariadb-database-create" Dec 09 15:31:34 crc kubenswrapper[4716]: E1209 15:31:34.408561 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31f45c75-847f-4adc-8bdf-3438bf5f9a4c" containerName="dnsmasq-dns" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.408568 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="31f45c75-847f-4adc-8bdf-3438bf5f9a4c" containerName="dnsmasq-dns" Dec 09 15:31:34 crc kubenswrapper[4716]: E1209 15:31:34.408582 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31f45c75-847f-4adc-8bdf-3438bf5f9a4c" containerName="init" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.408589 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="31f45c75-847f-4adc-8bdf-3438bf5f9a4c" containerName="init" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.408792 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="31f45c75-847f-4adc-8bdf-3438bf5f9a4c" containerName="dnsmasq-dns" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.408820 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="95ca86cf-1004-4d90-8ed7-c5e8277f2f84" containerName="mariadb-database-create" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.409493 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-jwqjv" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.449031 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-jwqjv"] Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.461885 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-10b6-account-create-update-gnpg9" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.604959 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jtldx\" (UniqueName: \"kubernetes.io/projected/0f8150e8-0d21-4e44-bfda-32a724caf6ad-kube-api-access-jtldx\") pod \"0f8150e8-0d21-4e44-bfda-32a724caf6ad\" (UID: \"0f8150e8-0d21-4e44-bfda-32a724caf6ad\") " Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.605037 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0f8150e8-0d21-4e44-bfda-32a724caf6ad-operator-scripts\") pod \"0f8150e8-0d21-4e44-bfda-32a724caf6ad\" (UID: \"0f8150e8-0d21-4e44-bfda-32a724caf6ad\") " Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.605491 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ac603427-aaf7-459a-b6bf-11fd5926113b-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-jwqjv\" (UID: \"ac603427-aaf7-459a-b6bf-11fd5926113b\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-jwqjv" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.605576 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qvj6n\" (UniqueName: \"kubernetes.io/projected/ac603427-aaf7-459a-b6bf-11fd5926113b-kube-api-access-qvj6n\") pod \"mysqld-exporter-openstack-cell1-db-create-jwqjv\" (UID: \"ac603427-aaf7-459a-b6bf-11fd5926113b\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-jwqjv" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.606225 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f8150e8-0d21-4e44-bfda-32a724caf6ad-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0f8150e8-0d21-4e44-bfda-32a724caf6ad" (UID: "0f8150e8-0d21-4e44-bfda-32a724caf6ad"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.614850 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f8150e8-0d21-4e44-bfda-32a724caf6ad-kube-api-access-jtldx" (OuterVolumeSpecName: "kube-api-access-jtldx") pod "0f8150e8-0d21-4e44-bfda-32a724caf6ad" (UID: "0f8150e8-0d21-4e44-bfda-32a724caf6ad"). InnerVolumeSpecName "kube-api-access-jtldx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.636888 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-6090-account-create-update-pqqf4"] Dec 09 15:31:34 crc kubenswrapper[4716]: E1209 15:31:34.637543 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f8150e8-0d21-4e44-bfda-32a724caf6ad" containerName="mariadb-account-create-update" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.637559 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f8150e8-0d21-4e44-bfda-32a724caf6ad" containerName="mariadb-account-create-update" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.637826 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f8150e8-0d21-4e44-bfda-32a724caf6ad" containerName="mariadb-account-create-update" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.638575 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-6090-account-create-update-pqqf4" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.641758 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-openstack-cell1-db-secret" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.647360 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-6090-account-create-update-pqqf4"] Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.720991 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ac603427-aaf7-459a-b6bf-11fd5926113b-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-jwqjv\" (UID: \"ac603427-aaf7-459a-b6bf-11fd5926113b\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-jwqjv" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.721098 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qvj6n\" (UniqueName: \"kubernetes.io/projected/ac603427-aaf7-459a-b6bf-11fd5926113b-kube-api-access-qvj6n\") pod \"mysqld-exporter-openstack-cell1-db-create-jwqjv\" (UID: \"ac603427-aaf7-459a-b6bf-11fd5926113b\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-jwqjv" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.721483 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jtldx\" (UniqueName: \"kubernetes.io/projected/0f8150e8-0d21-4e44-bfda-32a724caf6ad-kube-api-access-jtldx\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.721552 4716 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0f8150e8-0d21-4e44-bfda-32a724caf6ad-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.722483 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ac603427-aaf7-459a-b6bf-11fd5926113b-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-jwqjv\" (UID: \"ac603427-aaf7-459a-b6bf-11fd5926113b\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-jwqjv" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.742464 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qvj6n\" (UniqueName: \"kubernetes.io/projected/ac603427-aaf7-459a-b6bf-11fd5926113b-kube-api-access-qvj6n\") pod \"mysqld-exporter-openstack-cell1-db-create-jwqjv\" (UID: \"ac603427-aaf7-459a-b6bf-11fd5926113b\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-jwqjv" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.784853 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-jwqjv" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.809307 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-83bd-account-create-update-xs2rb" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.816600 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-a78b-account-create-update-mhkd2" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.825518 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0529dfa0-1e8c-4112-af01-c03890a167cd-operator-scripts\") pod \"mysqld-exporter-6090-account-create-update-pqqf4\" (UID: \"0529dfa0-1e8c-4112-af01-c03890a167cd\") " pod="openstack/mysqld-exporter-6090-account-create-update-pqqf4" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.825917 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gmb2v\" (UniqueName: \"kubernetes.io/projected/0529dfa0-1e8c-4112-af01-c03890a167cd-kube-api-access-gmb2v\") pod \"mysqld-exporter-6090-account-create-update-pqqf4\" (UID: \"0529dfa0-1e8c-4112-af01-c03890a167cd\") " pod="openstack/mysqld-exporter-6090-account-create-update-pqqf4" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.835765 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-zftdd" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.849813 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-wgl8z" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.929073 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e36549f8-8725-4e36-840b-1bdaa80c2e52-operator-scripts\") pod \"e36549f8-8725-4e36-840b-1bdaa80c2e52\" (UID: \"e36549f8-8725-4e36-840b-1bdaa80c2e52\") " Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.929148 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nbj4p\" (UniqueName: \"kubernetes.io/projected/e36549f8-8725-4e36-840b-1bdaa80c2e52-kube-api-access-nbj4p\") pod \"e36549f8-8725-4e36-840b-1bdaa80c2e52\" (UID: \"e36549f8-8725-4e36-840b-1bdaa80c2e52\") " Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.929302 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-txgfj\" (UniqueName: \"kubernetes.io/projected/eed32c8a-48cc-4162-93b6-7830c131a586-kube-api-access-txgfj\") pod \"eed32c8a-48cc-4162-93b6-7830c131a586\" (UID: \"eed32c8a-48cc-4162-93b6-7830c131a586\") " Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.929344 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eed32c8a-48cc-4162-93b6-7830c131a586-operator-scripts\") pod \"eed32c8a-48cc-4162-93b6-7830c131a586\" (UID: \"eed32c8a-48cc-4162-93b6-7830c131a586\") " Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.929760 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0529dfa0-1e8c-4112-af01-c03890a167cd-operator-scripts\") pod \"mysqld-exporter-6090-account-create-update-pqqf4\" (UID: \"0529dfa0-1e8c-4112-af01-c03890a167cd\") " pod="openstack/mysqld-exporter-6090-account-create-update-pqqf4" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.929862 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gmb2v\" (UniqueName: \"kubernetes.io/projected/0529dfa0-1e8c-4112-af01-c03890a167cd-kube-api-access-gmb2v\") pod \"mysqld-exporter-6090-account-create-update-pqqf4\" (UID: \"0529dfa0-1e8c-4112-af01-c03890a167cd\") " pod="openstack/mysqld-exporter-6090-account-create-update-pqqf4" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.931274 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eed32c8a-48cc-4162-93b6-7830c131a586-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "eed32c8a-48cc-4162-93b6-7830c131a586" (UID: "eed32c8a-48cc-4162-93b6-7830c131a586"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.931308 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e36549f8-8725-4e36-840b-1bdaa80c2e52-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e36549f8-8725-4e36-840b-1bdaa80c2e52" (UID: "e36549f8-8725-4e36-840b-1bdaa80c2e52"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.933432 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0529dfa0-1e8c-4112-af01-c03890a167cd-operator-scripts\") pod \"mysqld-exporter-6090-account-create-update-pqqf4\" (UID: \"0529dfa0-1e8c-4112-af01-c03890a167cd\") " pod="openstack/mysqld-exporter-6090-account-create-update-pqqf4" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.935315 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e36549f8-8725-4e36-840b-1bdaa80c2e52-kube-api-access-nbj4p" (OuterVolumeSpecName: "kube-api-access-nbj4p") pod "e36549f8-8725-4e36-840b-1bdaa80c2e52" (UID: "e36549f8-8725-4e36-840b-1bdaa80c2e52"). InnerVolumeSpecName "kube-api-access-nbj4p". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.935803 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eed32c8a-48cc-4162-93b6-7830c131a586-kube-api-access-txgfj" (OuterVolumeSpecName: "kube-api-access-txgfj") pod "eed32c8a-48cc-4162-93b6-7830c131a586" (UID: "eed32c8a-48cc-4162-93b6-7830c131a586"). InnerVolumeSpecName "kube-api-access-txgfj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.948324 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-zftdd" event={"ID":"d3dcd346-c1af-49fd-91f2-4ff1d1d3329b","Type":"ContainerDied","Data":"a56657d2f454e615b0f1bda6ca18ca9b4e7f1b84bed5519cbbd06cbb8e2ca777"} Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.948378 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a56657d2f454e615b0f1bda6ca18ca9b4e7f1b84bed5519cbbd06cbb8e2ca777" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.948444 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-zftdd" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.951594 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-wgl8z" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.951579 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-wgl8z" event={"ID":"7b147869-4532-4858-afbe-5280be85584a","Type":"ContainerDied","Data":"44ff19a753eab99ba6d5b1d1742287b2398f96eee85cbfd18337ef2ac3784127"} Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.951780 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="44ff19a753eab99ba6d5b1d1742287b2398f96eee85cbfd18337ef2ac3784127" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.953849 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-83bd-account-create-update-xs2rb" event={"ID":"e36549f8-8725-4e36-840b-1bdaa80c2e52","Type":"ContainerDied","Data":"e0528881a933680cfcb5d788b5a5d2a023dc3cbf862b44294fd98c7e6413d3b0"} Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.953874 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e0528881a933680cfcb5d788b5a5d2a023dc3cbf862b44294fd98c7e6413d3b0" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.953927 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-83bd-account-create-update-xs2rb" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.956168 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-10b6-account-create-update-gnpg9" event={"ID":"0f8150e8-0d21-4e44-bfda-32a724caf6ad","Type":"ContainerDied","Data":"719e9023c5c8cd7b80c17255c715ec8a9136e0d00956560814740b414b51b8c8"} Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.956191 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="719e9023c5c8cd7b80c17255c715ec8a9136e0d00956560814740b414b51b8c8" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.956239 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-10b6-account-create-update-gnpg9" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.962693 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-a78b-account-create-update-mhkd2" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.966248 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-a78b-account-create-update-mhkd2" event={"ID":"eed32c8a-48cc-4162-93b6-7830c131a586","Type":"ContainerDied","Data":"82de814c6e1ad29d4f88aa8829eb3da588e3118f9451b8235b919866665f3148"} Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.966301 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="82de814c6e1ad29d4f88aa8829eb3da588e3118f9451b8235b919866665f3148" Dec 09 15:31:34 crc kubenswrapper[4716]: I1209 15:31:34.969547 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gmb2v\" (UniqueName: \"kubernetes.io/projected/0529dfa0-1e8c-4112-af01-c03890a167cd-kube-api-access-gmb2v\") pod \"mysqld-exporter-6090-account-create-update-pqqf4\" (UID: \"0529dfa0-1e8c-4112-af01-c03890a167cd\") " pod="openstack/mysqld-exporter-6090-account-create-update-pqqf4" Dec 09 15:31:35 crc kubenswrapper[4716]: I1209 15:31:35.030958 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7b147869-4532-4858-afbe-5280be85584a-operator-scripts\") pod \"7b147869-4532-4858-afbe-5280be85584a\" (UID: \"7b147869-4532-4858-afbe-5280be85584a\") " Dec 09 15:31:35 crc kubenswrapper[4716]: I1209 15:31:35.031363 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-65qkz\" (UniqueName: \"kubernetes.io/projected/d3dcd346-c1af-49fd-91f2-4ff1d1d3329b-kube-api-access-65qkz\") pod \"d3dcd346-c1af-49fd-91f2-4ff1d1d3329b\" (UID: \"d3dcd346-c1af-49fd-91f2-4ff1d1d3329b\") " Dec 09 15:31:35 crc kubenswrapper[4716]: I1209 15:31:35.031517 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-69dm8\" (UniqueName: \"kubernetes.io/projected/7b147869-4532-4858-afbe-5280be85584a-kube-api-access-69dm8\") pod \"7b147869-4532-4858-afbe-5280be85584a\" (UID: \"7b147869-4532-4858-afbe-5280be85584a\") " Dec 09 15:31:35 crc kubenswrapper[4716]: I1209 15:31:35.031567 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d3dcd346-c1af-49fd-91f2-4ff1d1d3329b-operator-scripts\") pod \"d3dcd346-c1af-49fd-91f2-4ff1d1d3329b\" (UID: \"d3dcd346-c1af-49fd-91f2-4ff1d1d3329b\") " Dec 09 15:31:35 crc kubenswrapper[4716]: I1209 15:31:35.032243 4716 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eed32c8a-48cc-4162-93b6-7830c131a586-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:35 crc kubenswrapper[4716]: I1209 15:31:35.032263 4716 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e36549f8-8725-4e36-840b-1bdaa80c2e52-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:35 crc kubenswrapper[4716]: I1209 15:31:35.032272 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nbj4p\" (UniqueName: \"kubernetes.io/projected/e36549f8-8725-4e36-840b-1bdaa80c2e52-kube-api-access-nbj4p\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:35 crc kubenswrapper[4716]: I1209 15:31:35.032283 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-txgfj\" (UniqueName: \"kubernetes.io/projected/eed32c8a-48cc-4162-93b6-7830c131a586-kube-api-access-txgfj\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:35 crc kubenswrapper[4716]: I1209 15:31:35.033171 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d3dcd346-c1af-49fd-91f2-4ff1d1d3329b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d3dcd346-c1af-49fd-91f2-4ff1d1d3329b" (UID: "d3dcd346-c1af-49fd-91f2-4ff1d1d3329b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:31:35 crc kubenswrapper[4716]: I1209 15:31:35.034118 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b147869-4532-4858-afbe-5280be85584a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7b147869-4532-4858-afbe-5280be85584a" (UID: "7b147869-4532-4858-afbe-5280be85584a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:31:35 crc kubenswrapper[4716]: I1209 15:31:35.036582 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b147869-4532-4858-afbe-5280be85584a-kube-api-access-69dm8" (OuterVolumeSpecName: "kube-api-access-69dm8") pod "7b147869-4532-4858-afbe-5280be85584a" (UID: "7b147869-4532-4858-afbe-5280be85584a"). InnerVolumeSpecName "kube-api-access-69dm8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:31:35 crc kubenswrapper[4716]: I1209 15:31:35.036557 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3dcd346-c1af-49fd-91f2-4ff1d1d3329b-kube-api-access-65qkz" (OuterVolumeSpecName: "kube-api-access-65qkz") pod "d3dcd346-c1af-49fd-91f2-4ff1d1d3329b" (UID: "d3dcd346-c1af-49fd-91f2-4ff1d1d3329b"). InnerVolumeSpecName "kube-api-access-65qkz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:31:35 crc kubenswrapper[4716]: I1209 15:31:35.104524 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-6090-account-create-update-pqqf4" Dec 09 15:31:35 crc kubenswrapper[4716]: I1209 15:31:35.134569 4716 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7b147869-4532-4858-afbe-5280be85584a-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:35 crc kubenswrapper[4716]: I1209 15:31:35.134600 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-65qkz\" (UniqueName: \"kubernetes.io/projected/d3dcd346-c1af-49fd-91f2-4ff1d1d3329b-kube-api-access-65qkz\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:35 crc kubenswrapper[4716]: I1209 15:31:35.134610 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-69dm8\" (UniqueName: \"kubernetes.io/projected/7b147869-4532-4858-afbe-5280be85584a-kube-api-access-69dm8\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:35 crc kubenswrapper[4716]: I1209 15:31:35.134636 4716 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d3dcd346-c1af-49fd-91f2-4ff1d1d3329b-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:35 crc kubenswrapper[4716]: I1209 15:31:35.345360 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-jwqjv"] Dec 09 15:31:35 crc kubenswrapper[4716]: W1209 15:31:35.356212 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podac603427_aaf7_459a_b6bf_11fd5926113b.slice/crio-0ae570733d140acdf0ba3e1fc67effc882a2ea35d2a95d81521b5e212a2061d9 WatchSource:0}: Error finding container 0ae570733d140acdf0ba3e1fc67effc882a2ea35d2a95d81521b5e212a2061d9: Status 404 returned error can't find the container with id 0ae570733d140acdf0ba3e1fc67effc882a2ea35d2a95d81521b5e212a2061d9 Dec 09 15:31:35 crc kubenswrapper[4716]: I1209 15:31:35.574565 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-6090-account-create-update-pqqf4"] Dec 09 15:31:35 crc kubenswrapper[4716]: I1209 15:31:35.613063 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:35 crc kubenswrapper[4716]: I1209 15:31:35.975779 4716 generic.go:334] "Generic (PLEG): container finished" podID="5b9ed832-324f-4995-8277-6e979cfc7bc1" containerID="1fce9b9f0b4c94a5b3fecc856db4eb48e4c6d27005b576d452b4f50f371a97b9" exitCode=0 Dec 09 15:31:35 crc kubenswrapper[4716]: I1209 15:31:35.975895 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-hkncs" event={"ID":"5b9ed832-324f-4995-8277-6e979cfc7bc1","Type":"ContainerDied","Data":"1fce9b9f0b4c94a5b3fecc856db4eb48e4c6d27005b576d452b4f50f371a97b9"} Dec 09 15:31:35 crc kubenswrapper[4716]: I1209 15:31:35.978406 4716 generic.go:334] "Generic (PLEG): container finished" podID="ac603427-aaf7-459a-b6bf-11fd5926113b" containerID="1bed3efe42d3cd6befc635ecdeadeb81c22f11f9b841434238e434fe8690e559" exitCode=0 Dec 09 15:31:35 crc kubenswrapper[4716]: I1209 15:31:35.978482 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-jwqjv" event={"ID":"ac603427-aaf7-459a-b6bf-11fd5926113b","Type":"ContainerDied","Data":"1bed3efe42d3cd6befc635ecdeadeb81c22f11f9b841434238e434fe8690e559"} Dec 09 15:31:35 crc kubenswrapper[4716]: I1209 15:31:35.978525 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-jwqjv" event={"ID":"ac603427-aaf7-459a-b6bf-11fd5926113b","Type":"ContainerStarted","Data":"0ae570733d140acdf0ba3e1fc67effc882a2ea35d2a95d81521b5e212a2061d9"} Dec 09 15:31:35 crc kubenswrapper[4716]: I1209 15:31:35.981041 4716 generic.go:334] "Generic (PLEG): container finished" podID="0529dfa0-1e8c-4112-af01-c03890a167cd" containerID="c8ab4fb3eb7c43e6529b4c02e78ed1d12b73bf9bc71bc42ea8bd35357a1d4e87" exitCode=0 Dec 09 15:31:35 crc kubenswrapper[4716]: I1209 15:31:35.981086 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-6090-account-create-update-pqqf4" event={"ID":"0529dfa0-1e8c-4112-af01-c03890a167cd","Type":"ContainerDied","Data":"c8ab4fb3eb7c43e6529b4c02e78ed1d12b73bf9bc71bc42ea8bd35357a1d4e87"} Dec 09 15:31:35 crc kubenswrapper[4716]: I1209 15:31:35.981116 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-6090-account-create-update-pqqf4" event={"ID":"0529dfa0-1e8c-4112-af01-c03890a167cd","Type":"ContainerStarted","Data":"f74409ee77c8147b2660427a18f308a8394356a8238053ab00cc9f7031431734"} Dec 09 15:31:36 crc kubenswrapper[4716]: I1209 15:31:36.461227 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-pf2c7" Dec 09 15:31:36 crc kubenswrapper[4716]: I1209 15:31:36.463372 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-pf2c7" Dec 09 15:31:36 crc kubenswrapper[4716]: I1209 15:31:36.686985 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-8mm7h-config-zl4cz"] Dec 09 15:31:36 crc kubenswrapper[4716]: E1209 15:31:36.687417 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eed32c8a-48cc-4162-93b6-7830c131a586" containerName="mariadb-account-create-update" Dec 09 15:31:36 crc kubenswrapper[4716]: I1209 15:31:36.687434 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="eed32c8a-48cc-4162-93b6-7830c131a586" containerName="mariadb-account-create-update" Dec 09 15:31:36 crc kubenswrapper[4716]: E1209 15:31:36.687443 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3dcd346-c1af-49fd-91f2-4ff1d1d3329b" containerName="mariadb-database-create" Dec 09 15:31:36 crc kubenswrapper[4716]: I1209 15:31:36.687450 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3dcd346-c1af-49fd-91f2-4ff1d1d3329b" containerName="mariadb-database-create" Dec 09 15:31:36 crc kubenswrapper[4716]: E1209 15:31:36.687475 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b147869-4532-4858-afbe-5280be85584a" containerName="mariadb-database-create" Dec 09 15:31:36 crc kubenswrapper[4716]: I1209 15:31:36.687481 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b147869-4532-4858-afbe-5280be85584a" containerName="mariadb-database-create" Dec 09 15:31:36 crc kubenswrapper[4716]: E1209 15:31:36.687512 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e36549f8-8725-4e36-840b-1bdaa80c2e52" containerName="mariadb-account-create-update" Dec 09 15:31:36 crc kubenswrapper[4716]: I1209 15:31:36.687517 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="e36549f8-8725-4e36-840b-1bdaa80c2e52" containerName="mariadb-account-create-update" Dec 09 15:31:36 crc kubenswrapper[4716]: I1209 15:31:36.687754 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b147869-4532-4858-afbe-5280be85584a" containerName="mariadb-database-create" Dec 09 15:31:36 crc kubenswrapper[4716]: I1209 15:31:36.687782 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="eed32c8a-48cc-4162-93b6-7830c131a586" containerName="mariadb-account-create-update" Dec 09 15:31:36 crc kubenswrapper[4716]: I1209 15:31:36.687796 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3dcd346-c1af-49fd-91f2-4ff1d1d3329b" containerName="mariadb-database-create" Dec 09 15:31:36 crc kubenswrapper[4716]: I1209 15:31:36.687815 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="e36549f8-8725-4e36-840b-1bdaa80c2e52" containerName="mariadb-account-create-update" Dec 09 15:31:36 crc kubenswrapper[4716]: I1209 15:31:36.689342 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-8mm7h-config-zl4cz" Dec 09 15:31:36 crc kubenswrapper[4716]: I1209 15:31:36.691870 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Dec 09 15:31:36 crc kubenswrapper[4716]: I1209 15:31:36.700609 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-8mm7h-config-zl4cz"] Dec 09 15:31:36 crc kubenswrapper[4716]: I1209 15:31:36.878550 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c2cjl\" (UniqueName: \"kubernetes.io/projected/c31e1b1c-1e1c-44ed-9235-1204cb2736b6-kube-api-access-c2cjl\") pod \"ovn-controller-8mm7h-config-zl4cz\" (UID: \"c31e1b1c-1e1c-44ed-9235-1204cb2736b6\") " pod="openstack/ovn-controller-8mm7h-config-zl4cz" Dec 09 15:31:36 crc kubenswrapper[4716]: I1209 15:31:36.878637 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/c31e1b1c-1e1c-44ed-9235-1204cb2736b6-additional-scripts\") pod \"ovn-controller-8mm7h-config-zl4cz\" (UID: \"c31e1b1c-1e1c-44ed-9235-1204cb2736b6\") " pod="openstack/ovn-controller-8mm7h-config-zl4cz" Dec 09 15:31:36 crc kubenswrapper[4716]: I1209 15:31:36.878674 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c31e1b1c-1e1c-44ed-9235-1204cb2736b6-scripts\") pod \"ovn-controller-8mm7h-config-zl4cz\" (UID: \"c31e1b1c-1e1c-44ed-9235-1204cb2736b6\") " pod="openstack/ovn-controller-8mm7h-config-zl4cz" Dec 09 15:31:36 crc kubenswrapper[4716]: I1209 15:31:36.878713 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c31e1b1c-1e1c-44ed-9235-1204cb2736b6-var-run-ovn\") pod \"ovn-controller-8mm7h-config-zl4cz\" (UID: \"c31e1b1c-1e1c-44ed-9235-1204cb2736b6\") " pod="openstack/ovn-controller-8mm7h-config-zl4cz" Dec 09 15:31:36 crc kubenswrapper[4716]: I1209 15:31:36.878751 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c31e1b1c-1e1c-44ed-9235-1204cb2736b6-var-log-ovn\") pod \"ovn-controller-8mm7h-config-zl4cz\" (UID: \"c31e1b1c-1e1c-44ed-9235-1204cb2736b6\") " pod="openstack/ovn-controller-8mm7h-config-zl4cz" Dec 09 15:31:36 crc kubenswrapper[4716]: I1209 15:31:36.878784 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c31e1b1c-1e1c-44ed-9235-1204cb2736b6-var-run\") pod \"ovn-controller-8mm7h-config-zl4cz\" (UID: \"c31e1b1c-1e1c-44ed-9235-1204cb2736b6\") " pod="openstack/ovn-controller-8mm7h-config-zl4cz" Dec 09 15:31:36 crc kubenswrapper[4716]: I1209 15:31:36.980271 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/c31e1b1c-1e1c-44ed-9235-1204cb2736b6-additional-scripts\") pod \"ovn-controller-8mm7h-config-zl4cz\" (UID: \"c31e1b1c-1e1c-44ed-9235-1204cb2736b6\") " pod="openstack/ovn-controller-8mm7h-config-zl4cz" Dec 09 15:31:36 crc kubenswrapper[4716]: I1209 15:31:36.980333 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c31e1b1c-1e1c-44ed-9235-1204cb2736b6-scripts\") pod \"ovn-controller-8mm7h-config-zl4cz\" (UID: \"c31e1b1c-1e1c-44ed-9235-1204cb2736b6\") " pod="openstack/ovn-controller-8mm7h-config-zl4cz" Dec 09 15:31:36 crc kubenswrapper[4716]: I1209 15:31:36.980364 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c31e1b1c-1e1c-44ed-9235-1204cb2736b6-var-run-ovn\") pod \"ovn-controller-8mm7h-config-zl4cz\" (UID: \"c31e1b1c-1e1c-44ed-9235-1204cb2736b6\") " pod="openstack/ovn-controller-8mm7h-config-zl4cz" Dec 09 15:31:36 crc kubenswrapper[4716]: I1209 15:31:36.980390 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c31e1b1c-1e1c-44ed-9235-1204cb2736b6-var-log-ovn\") pod \"ovn-controller-8mm7h-config-zl4cz\" (UID: \"c31e1b1c-1e1c-44ed-9235-1204cb2736b6\") " pod="openstack/ovn-controller-8mm7h-config-zl4cz" Dec 09 15:31:36 crc kubenswrapper[4716]: I1209 15:31:36.980413 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c31e1b1c-1e1c-44ed-9235-1204cb2736b6-var-run\") pod \"ovn-controller-8mm7h-config-zl4cz\" (UID: \"c31e1b1c-1e1c-44ed-9235-1204cb2736b6\") " pod="openstack/ovn-controller-8mm7h-config-zl4cz" Dec 09 15:31:36 crc kubenswrapper[4716]: I1209 15:31:36.980564 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c2cjl\" (UniqueName: \"kubernetes.io/projected/c31e1b1c-1e1c-44ed-9235-1204cb2736b6-kube-api-access-c2cjl\") pod \"ovn-controller-8mm7h-config-zl4cz\" (UID: \"c31e1b1c-1e1c-44ed-9235-1204cb2736b6\") " pod="openstack/ovn-controller-8mm7h-config-zl4cz" Dec 09 15:31:36 crc kubenswrapper[4716]: I1209 15:31:36.981585 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c31e1b1c-1e1c-44ed-9235-1204cb2736b6-var-log-ovn\") pod \"ovn-controller-8mm7h-config-zl4cz\" (UID: \"c31e1b1c-1e1c-44ed-9235-1204cb2736b6\") " pod="openstack/ovn-controller-8mm7h-config-zl4cz" Dec 09 15:31:36 crc kubenswrapper[4716]: I1209 15:31:36.981714 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c31e1b1c-1e1c-44ed-9235-1204cb2736b6-var-run\") pod \"ovn-controller-8mm7h-config-zl4cz\" (UID: \"c31e1b1c-1e1c-44ed-9235-1204cb2736b6\") " pod="openstack/ovn-controller-8mm7h-config-zl4cz" Dec 09 15:31:36 crc kubenswrapper[4716]: I1209 15:31:36.981585 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c31e1b1c-1e1c-44ed-9235-1204cb2736b6-var-run-ovn\") pod \"ovn-controller-8mm7h-config-zl4cz\" (UID: \"c31e1b1c-1e1c-44ed-9235-1204cb2736b6\") " pod="openstack/ovn-controller-8mm7h-config-zl4cz" Dec 09 15:31:36 crc kubenswrapper[4716]: I1209 15:31:36.981738 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/c31e1b1c-1e1c-44ed-9235-1204cb2736b6-additional-scripts\") pod \"ovn-controller-8mm7h-config-zl4cz\" (UID: \"c31e1b1c-1e1c-44ed-9235-1204cb2736b6\") " pod="openstack/ovn-controller-8mm7h-config-zl4cz" Dec 09 15:31:36 crc kubenswrapper[4716]: I1209 15:31:36.983514 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c31e1b1c-1e1c-44ed-9235-1204cb2736b6-scripts\") pod \"ovn-controller-8mm7h-config-zl4cz\" (UID: \"c31e1b1c-1e1c-44ed-9235-1204cb2736b6\") " pod="openstack/ovn-controller-8mm7h-config-zl4cz" Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.014394 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c2cjl\" (UniqueName: \"kubernetes.io/projected/c31e1b1c-1e1c-44ed-9235-1204cb2736b6-kube-api-access-c2cjl\") pod \"ovn-controller-8mm7h-config-zl4cz\" (UID: \"c31e1b1c-1e1c-44ed-9235-1204cb2736b6\") " pod="openstack/ovn-controller-8mm7h-config-zl4cz" Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.019821 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-8mm7h-config-zl4cz" Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.500039 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-hkncs" Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.589153 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-jwqjv" Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.597299 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-6090-account-create-update-pqqf4" Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.669120 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-8mm7h-config-zl4cz"] Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.692704 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0529dfa0-1e8c-4112-af01-c03890a167cd-operator-scripts\") pod \"0529dfa0-1e8c-4112-af01-c03890a167cd\" (UID: \"0529dfa0-1e8c-4112-af01-c03890a167cd\") " Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.692786 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5b9ed832-324f-4995-8277-6e979cfc7bc1-scripts\") pod \"5b9ed832-324f-4995-8277-6e979cfc7bc1\" (UID: \"5b9ed832-324f-4995-8277-6e979cfc7bc1\") " Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.692817 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tx6dx\" (UniqueName: \"kubernetes.io/projected/5b9ed832-324f-4995-8277-6e979cfc7bc1-kube-api-access-tx6dx\") pod \"5b9ed832-324f-4995-8277-6e979cfc7bc1\" (UID: \"5b9ed832-324f-4995-8277-6e979cfc7bc1\") " Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.692858 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/5b9ed832-324f-4995-8277-6e979cfc7bc1-ring-data-devices\") pod \"5b9ed832-324f-4995-8277-6e979cfc7bc1\" (UID: \"5b9ed832-324f-4995-8277-6e979cfc7bc1\") " Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.692979 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/5b9ed832-324f-4995-8277-6e979cfc7bc1-etc-swift\") pod \"5b9ed832-324f-4995-8277-6e979cfc7bc1\" (UID: \"5b9ed832-324f-4995-8277-6e979cfc7bc1\") " Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.693004 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b9ed832-324f-4995-8277-6e979cfc7bc1-combined-ca-bundle\") pod \"5b9ed832-324f-4995-8277-6e979cfc7bc1\" (UID: \"5b9ed832-324f-4995-8277-6e979cfc7bc1\") " Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.693090 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/5b9ed832-324f-4995-8277-6e979cfc7bc1-swiftconf\") pod \"5b9ed832-324f-4995-8277-6e979cfc7bc1\" (UID: \"5b9ed832-324f-4995-8277-6e979cfc7bc1\") " Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.693138 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/5b9ed832-324f-4995-8277-6e979cfc7bc1-dispersionconf\") pod \"5b9ed832-324f-4995-8277-6e979cfc7bc1\" (UID: \"5b9ed832-324f-4995-8277-6e979cfc7bc1\") " Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.693159 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ac603427-aaf7-459a-b6bf-11fd5926113b-operator-scripts\") pod \"ac603427-aaf7-459a-b6bf-11fd5926113b\" (UID: \"ac603427-aaf7-459a-b6bf-11fd5926113b\") " Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.693208 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gmb2v\" (UniqueName: \"kubernetes.io/projected/0529dfa0-1e8c-4112-af01-c03890a167cd-kube-api-access-gmb2v\") pod \"0529dfa0-1e8c-4112-af01-c03890a167cd\" (UID: \"0529dfa0-1e8c-4112-af01-c03890a167cd\") " Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.693228 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qvj6n\" (UniqueName: \"kubernetes.io/projected/ac603427-aaf7-459a-b6bf-11fd5926113b-kube-api-access-qvj6n\") pod \"ac603427-aaf7-459a-b6bf-11fd5926113b\" (UID: \"ac603427-aaf7-459a-b6bf-11fd5926113b\") " Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.693523 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0529dfa0-1e8c-4112-af01-c03890a167cd-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0529dfa0-1e8c-4112-af01-c03890a167cd" (UID: "0529dfa0-1e8c-4112-af01-c03890a167cd"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.694152 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b9ed832-324f-4995-8277-6e979cfc7bc1-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "5b9ed832-324f-4995-8277-6e979cfc7bc1" (UID: "5b9ed832-324f-4995-8277-6e979cfc7bc1"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.694599 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ac603427-aaf7-459a-b6bf-11fd5926113b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ac603427-aaf7-459a-b6bf-11fd5926113b" (UID: "ac603427-aaf7-459a-b6bf-11fd5926113b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.695137 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b9ed832-324f-4995-8277-6e979cfc7bc1-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "5b9ed832-324f-4995-8277-6e979cfc7bc1" (UID: "5b9ed832-324f-4995-8277-6e979cfc7bc1"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.695341 4716 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ac603427-aaf7-459a-b6bf-11fd5926113b-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.695435 4716 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0529dfa0-1e8c-4112-af01-c03890a167cd-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.695514 4716 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/5b9ed832-324f-4995-8277-6e979cfc7bc1-ring-data-devices\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.698185 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac603427-aaf7-459a-b6bf-11fd5926113b-kube-api-access-qvj6n" (OuterVolumeSpecName: "kube-api-access-qvj6n") pod "ac603427-aaf7-459a-b6bf-11fd5926113b" (UID: "ac603427-aaf7-459a-b6bf-11fd5926113b"). InnerVolumeSpecName "kube-api-access-qvj6n". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.698652 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0529dfa0-1e8c-4112-af01-c03890a167cd-kube-api-access-gmb2v" (OuterVolumeSpecName: "kube-api-access-gmb2v") pod "0529dfa0-1e8c-4112-af01-c03890a167cd" (UID: "0529dfa0-1e8c-4112-af01-c03890a167cd"). InnerVolumeSpecName "kube-api-access-gmb2v". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.698672 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b9ed832-324f-4995-8277-6e979cfc7bc1-kube-api-access-tx6dx" (OuterVolumeSpecName: "kube-api-access-tx6dx") pod "5b9ed832-324f-4995-8277-6e979cfc7bc1" (UID: "5b9ed832-324f-4995-8277-6e979cfc7bc1"). InnerVolumeSpecName "kube-api-access-tx6dx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.701273 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b9ed832-324f-4995-8277-6e979cfc7bc1-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "5b9ed832-324f-4995-8277-6e979cfc7bc1" (UID: "5b9ed832-324f-4995-8277-6e979cfc7bc1"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.720599 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b9ed832-324f-4995-8277-6e979cfc7bc1-scripts" (OuterVolumeSpecName: "scripts") pod "5b9ed832-324f-4995-8277-6e979cfc7bc1" (UID: "5b9ed832-324f-4995-8277-6e979cfc7bc1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.721495 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b9ed832-324f-4995-8277-6e979cfc7bc1-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "5b9ed832-324f-4995-8277-6e979cfc7bc1" (UID: "5b9ed832-324f-4995-8277-6e979cfc7bc1"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.728100 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b9ed832-324f-4995-8277-6e979cfc7bc1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5b9ed832-324f-4995-8277-6e979cfc7bc1" (UID: "5b9ed832-324f-4995-8277-6e979cfc7bc1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.797919 4716 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/5b9ed832-324f-4995-8277-6e979cfc7bc1-swiftconf\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.797959 4716 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/5b9ed832-324f-4995-8277-6e979cfc7bc1-dispersionconf\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.797977 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gmb2v\" (UniqueName: \"kubernetes.io/projected/0529dfa0-1e8c-4112-af01-c03890a167cd-kube-api-access-gmb2v\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.797993 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qvj6n\" (UniqueName: \"kubernetes.io/projected/ac603427-aaf7-459a-b6bf-11fd5926113b-kube-api-access-qvj6n\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.798007 4716 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5b9ed832-324f-4995-8277-6e979cfc7bc1-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.798040 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tx6dx\" (UniqueName: \"kubernetes.io/projected/5b9ed832-324f-4995-8277-6e979cfc7bc1-kube-api-access-tx6dx\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.798052 4716 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/5b9ed832-324f-4995-8277-6e979cfc7bc1-etc-swift\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.798063 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b9ed832-324f-4995-8277-6e979cfc7bc1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.999103 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-6090-account-create-update-pqqf4" event={"ID":"0529dfa0-1e8c-4112-af01-c03890a167cd","Type":"ContainerDied","Data":"f74409ee77c8147b2660427a18f308a8394356a8238053ab00cc9f7031431734"} Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.999460 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f74409ee77c8147b2660427a18f308a8394356a8238053ab00cc9f7031431734" Dec 09 15:31:37 crc kubenswrapper[4716]: I1209 15:31:37.999144 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-6090-account-create-update-pqqf4" Dec 09 15:31:38 crc kubenswrapper[4716]: I1209 15:31:38.004100 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-8mm7h-config-zl4cz" event={"ID":"c31e1b1c-1e1c-44ed-9235-1204cb2736b6","Type":"ContainerStarted","Data":"0460309a5095d240c5808831d399e9fd710461684fa8878a37d5bf1fb7faaf34"} Dec 09 15:31:38 crc kubenswrapper[4716]: I1209 15:31:38.004144 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-8mm7h-config-zl4cz" event={"ID":"c31e1b1c-1e1c-44ed-9235-1204cb2736b6","Type":"ContainerStarted","Data":"00be117262fef7776d1d2f6fc519407c26334a0fcae0a23d571879ca81df08fc"} Dec 09 15:31:38 crc kubenswrapper[4716]: I1209 15:31:38.005949 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-hkncs" event={"ID":"5b9ed832-324f-4995-8277-6e979cfc7bc1","Type":"ContainerDied","Data":"c7d2040cf1da931d9c97382985334476be045d920ec8005b7119d866487b01a0"} Dec 09 15:31:38 crc kubenswrapper[4716]: I1209 15:31:38.005995 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c7d2040cf1da931d9c97382985334476be045d920ec8005b7119d866487b01a0" Dec 09 15:31:38 crc kubenswrapper[4716]: I1209 15:31:38.006078 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-hkncs" Dec 09 15:31:38 crc kubenswrapper[4716]: I1209 15:31:38.011503 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-jwqjv" event={"ID":"ac603427-aaf7-459a-b6bf-11fd5926113b","Type":"ContainerDied","Data":"0ae570733d140acdf0ba3e1fc67effc882a2ea35d2a95d81521b5e212a2061d9"} Dec 09 15:31:38 crc kubenswrapper[4716]: I1209 15:31:38.011551 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0ae570733d140acdf0ba3e1fc67effc882a2ea35d2a95d81521b5e212a2061d9" Dec 09 15:31:38 crc kubenswrapper[4716]: I1209 15:31:38.011571 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-jwqjv" Dec 09 15:31:38 crc kubenswrapper[4716]: I1209 15:31:38.021645 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-8mm7h-config-zl4cz" podStartSLOduration=2.021598218 podStartE2EDuration="2.021598218s" podCreationTimestamp="2025-12-09 15:31:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:31:38.019852468 +0000 UTC m=+1385.174596456" watchObservedRunningTime="2025-12-09 15:31:38.021598218 +0000 UTC m=+1385.176342206" Dec 09 15:31:39 crc kubenswrapper[4716]: I1209 15:31:39.021382 4716 generic.go:334] "Generic (PLEG): container finished" podID="c31e1b1c-1e1c-44ed-9235-1204cb2736b6" containerID="0460309a5095d240c5808831d399e9fd710461684fa8878a37d5bf1fb7faaf34" exitCode=0 Dec 09 15:31:39 crc kubenswrapper[4716]: I1209 15:31:39.021683 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-8mm7h-config-zl4cz" event={"ID":"c31e1b1c-1e1c-44ed-9235-1204cb2736b6","Type":"ContainerDied","Data":"0460309a5095d240c5808831d399e9fd710461684fa8878a37d5bf1fb7faaf34"} Dec 09 15:31:39 crc kubenswrapper[4716]: I1209 15:31:39.946949 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-0"] Dec 09 15:31:39 crc kubenswrapper[4716]: E1209 15:31:39.947404 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac603427-aaf7-459a-b6bf-11fd5926113b" containerName="mariadb-database-create" Dec 09 15:31:39 crc kubenswrapper[4716]: I1209 15:31:39.947425 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac603427-aaf7-459a-b6bf-11fd5926113b" containerName="mariadb-database-create" Dec 09 15:31:39 crc kubenswrapper[4716]: E1209 15:31:39.947448 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b9ed832-324f-4995-8277-6e979cfc7bc1" containerName="swift-ring-rebalance" Dec 09 15:31:39 crc kubenswrapper[4716]: I1209 15:31:39.947455 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b9ed832-324f-4995-8277-6e979cfc7bc1" containerName="swift-ring-rebalance" Dec 09 15:31:39 crc kubenswrapper[4716]: E1209 15:31:39.947475 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0529dfa0-1e8c-4112-af01-c03890a167cd" containerName="mariadb-account-create-update" Dec 09 15:31:39 crc kubenswrapper[4716]: I1209 15:31:39.947483 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="0529dfa0-1e8c-4112-af01-c03890a167cd" containerName="mariadb-account-create-update" Dec 09 15:31:39 crc kubenswrapper[4716]: I1209 15:31:39.948001 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b9ed832-324f-4995-8277-6e979cfc7bc1" containerName="swift-ring-rebalance" Dec 09 15:31:39 crc kubenswrapper[4716]: I1209 15:31:39.948045 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac603427-aaf7-459a-b6bf-11fd5926113b" containerName="mariadb-database-create" Dec 09 15:31:39 crc kubenswrapper[4716]: I1209 15:31:39.948059 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="0529dfa0-1e8c-4112-af01-c03890a167cd" containerName="mariadb-account-create-update" Dec 09 15:31:39 crc kubenswrapper[4716]: I1209 15:31:39.948938 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Dec 09 15:31:39 crc kubenswrapper[4716]: I1209 15:31:39.951823 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-config-data" Dec 09 15:31:39 crc kubenswrapper[4716]: I1209 15:31:39.958982 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.143470 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54db648f-e02a-4af0-b425-458a51844527-config-data\") pod \"mysqld-exporter-0\" (UID: \"54db648f-e02a-4af0-b425-458a51844527\") " pod="openstack/mysqld-exporter-0" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.143904 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sdrcn\" (UniqueName: \"kubernetes.io/projected/54db648f-e02a-4af0-b425-458a51844527-kube-api-access-sdrcn\") pod \"mysqld-exporter-0\" (UID: \"54db648f-e02a-4af0-b425-458a51844527\") " pod="openstack/mysqld-exporter-0" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.143922 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-zf7ll"] Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.143983 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54db648f-e02a-4af0-b425-458a51844527-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"54db648f-e02a-4af0-b425-458a51844527\") " pod="openstack/mysqld-exporter-0" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.146215 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-zf7ll" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.150334 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.151469 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-df9ww" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.168711 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-zf7ll"] Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.247190 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sdrcn\" (UniqueName: \"kubernetes.io/projected/54db648f-e02a-4af0-b425-458a51844527-kube-api-access-sdrcn\") pod \"mysqld-exporter-0\" (UID: \"54db648f-e02a-4af0-b425-458a51844527\") " pod="openstack/mysqld-exporter-0" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.247379 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54db648f-e02a-4af0-b425-458a51844527-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"54db648f-e02a-4af0-b425-458a51844527\") " pod="openstack/mysqld-exporter-0" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.247447 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6jnfb\" (UniqueName: \"kubernetes.io/projected/cf4177cf-f9cb-4e7f-94d8-bec58db07c2f-kube-api-access-6jnfb\") pod \"glance-db-sync-zf7ll\" (UID: \"cf4177cf-f9cb-4e7f-94d8-bec58db07c2f\") " pod="openstack/glance-db-sync-zf7ll" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.247515 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf4177cf-f9cb-4e7f-94d8-bec58db07c2f-combined-ca-bundle\") pod \"glance-db-sync-zf7ll\" (UID: \"cf4177cf-f9cb-4e7f-94d8-bec58db07c2f\") " pod="openstack/glance-db-sync-zf7ll" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.247578 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54db648f-e02a-4af0-b425-458a51844527-config-data\") pod \"mysqld-exporter-0\" (UID: \"54db648f-e02a-4af0-b425-458a51844527\") " pod="openstack/mysqld-exporter-0" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.248710 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf4177cf-f9cb-4e7f-94d8-bec58db07c2f-config-data\") pod \"glance-db-sync-zf7ll\" (UID: \"cf4177cf-f9cb-4e7f-94d8-bec58db07c2f\") " pod="openstack/glance-db-sync-zf7ll" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.248864 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/cf4177cf-f9cb-4e7f-94d8-bec58db07c2f-db-sync-config-data\") pod \"glance-db-sync-zf7ll\" (UID: \"cf4177cf-f9cb-4e7f-94d8-bec58db07c2f\") " pod="openstack/glance-db-sync-zf7ll" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.261308 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54db648f-e02a-4af0-b425-458a51844527-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"54db648f-e02a-4af0-b425-458a51844527\") " pod="openstack/mysqld-exporter-0" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.271790 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54db648f-e02a-4af0-b425-458a51844527-config-data\") pod \"mysqld-exporter-0\" (UID: \"54db648f-e02a-4af0-b425-458a51844527\") " pod="openstack/mysqld-exporter-0" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.271823 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sdrcn\" (UniqueName: \"kubernetes.io/projected/54db648f-e02a-4af0-b425-458a51844527-kube-api-access-sdrcn\") pod \"mysqld-exporter-0\" (UID: \"54db648f-e02a-4af0-b425-458a51844527\") " pod="openstack/mysqld-exporter-0" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.351634 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6jnfb\" (UniqueName: \"kubernetes.io/projected/cf4177cf-f9cb-4e7f-94d8-bec58db07c2f-kube-api-access-6jnfb\") pod \"glance-db-sync-zf7ll\" (UID: \"cf4177cf-f9cb-4e7f-94d8-bec58db07c2f\") " pod="openstack/glance-db-sync-zf7ll" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.351707 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf4177cf-f9cb-4e7f-94d8-bec58db07c2f-combined-ca-bundle\") pod \"glance-db-sync-zf7ll\" (UID: \"cf4177cf-f9cb-4e7f-94d8-bec58db07c2f\") " pod="openstack/glance-db-sync-zf7ll" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.351775 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf4177cf-f9cb-4e7f-94d8-bec58db07c2f-config-data\") pod \"glance-db-sync-zf7ll\" (UID: \"cf4177cf-f9cb-4e7f-94d8-bec58db07c2f\") " pod="openstack/glance-db-sync-zf7ll" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.351834 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/cf4177cf-f9cb-4e7f-94d8-bec58db07c2f-db-sync-config-data\") pod \"glance-db-sync-zf7ll\" (UID: \"cf4177cf-f9cb-4e7f-94d8-bec58db07c2f\") " pod="openstack/glance-db-sync-zf7ll" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.356459 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf4177cf-f9cb-4e7f-94d8-bec58db07c2f-config-data\") pod \"glance-db-sync-zf7ll\" (UID: \"cf4177cf-f9cb-4e7f-94d8-bec58db07c2f\") " pod="openstack/glance-db-sync-zf7ll" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.357082 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf4177cf-f9cb-4e7f-94d8-bec58db07c2f-combined-ca-bundle\") pod \"glance-db-sync-zf7ll\" (UID: \"cf4177cf-f9cb-4e7f-94d8-bec58db07c2f\") " pod="openstack/glance-db-sync-zf7ll" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.358008 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/cf4177cf-f9cb-4e7f-94d8-bec58db07c2f-db-sync-config-data\") pod \"glance-db-sync-zf7ll\" (UID: \"cf4177cf-f9cb-4e7f-94d8-bec58db07c2f\") " pod="openstack/glance-db-sync-zf7ll" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.371264 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6jnfb\" (UniqueName: \"kubernetes.io/projected/cf4177cf-f9cb-4e7f-94d8-bec58db07c2f-kube-api-access-6jnfb\") pod \"glance-db-sync-zf7ll\" (UID: \"cf4177cf-f9cb-4e7f-94d8-bec58db07c2f\") " pod="openstack/glance-db-sync-zf7ll" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.431693 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-8mm7h-config-zl4cz" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.499528 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-zf7ll" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.554725 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/c31e1b1c-1e1c-44ed-9235-1204cb2736b6-additional-scripts\") pod \"c31e1b1c-1e1c-44ed-9235-1204cb2736b6\" (UID: \"c31e1b1c-1e1c-44ed-9235-1204cb2736b6\") " Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.554938 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c31e1b1c-1e1c-44ed-9235-1204cb2736b6-scripts\") pod \"c31e1b1c-1e1c-44ed-9235-1204cb2736b6\" (UID: \"c31e1b1c-1e1c-44ed-9235-1204cb2736b6\") " Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.554975 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c31e1b1c-1e1c-44ed-9235-1204cb2736b6-var-run-ovn\") pod \"c31e1b1c-1e1c-44ed-9235-1204cb2736b6\" (UID: \"c31e1b1c-1e1c-44ed-9235-1204cb2736b6\") " Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.555066 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c31e1b1c-1e1c-44ed-9235-1204cb2736b6-var-run\") pod \"c31e1b1c-1e1c-44ed-9235-1204cb2736b6\" (UID: \"c31e1b1c-1e1c-44ed-9235-1204cb2736b6\") " Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.555102 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c2cjl\" (UniqueName: \"kubernetes.io/projected/c31e1b1c-1e1c-44ed-9235-1204cb2736b6-kube-api-access-c2cjl\") pod \"c31e1b1c-1e1c-44ed-9235-1204cb2736b6\" (UID: \"c31e1b1c-1e1c-44ed-9235-1204cb2736b6\") " Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.555123 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c31e1b1c-1e1c-44ed-9235-1204cb2736b6-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "c31e1b1c-1e1c-44ed-9235-1204cb2736b6" (UID: "c31e1b1c-1e1c-44ed-9235-1204cb2736b6"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.555166 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c31e1b1c-1e1c-44ed-9235-1204cb2736b6-var-run" (OuterVolumeSpecName: "var-run") pod "c31e1b1c-1e1c-44ed-9235-1204cb2736b6" (UID: "c31e1b1c-1e1c-44ed-9235-1204cb2736b6"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.555203 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c31e1b1c-1e1c-44ed-9235-1204cb2736b6-var-log-ovn\") pod \"c31e1b1c-1e1c-44ed-9235-1204cb2736b6\" (UID: \"c31e1b1c-1e1c-44ed-9235-1204cb2736b6\") " Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.555572 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c31e1b1c-1e1c-44ed-9235-1204cb2736b6-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "c31e1b1c-1e1c-44ed-9235-1204cb2736b6" (UID: "c31e1b1c-1e1c-44ed-9235-1204cb2736b6"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.556228 4716 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c31e1b1c-1e1c-44ed-9235-1204cb2736b6-var-log-ovn\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.556258 4716 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c31e1b1c-1e1c-44ed-9235-1204cb2736b6-var-run-ovn\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.556271 4716 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c31e1b1c-1e1c-44ed-9235-1204cb2736b6-var-run\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.556809 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c31e1b1c-1e1c-44ed-9235-1204cb2736b6-scripts" (OuterVolumeSpecName: "scripts") pod "c31e1b1c-1e1c-44ed-9235-1204cb2736b6" (UID: "c31e1b1c-1e1c-44ed-9235-1204cb2736b6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.558065 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c31e1b1c-1e1c-44ed-9235-1204cb2736b6-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "c31e1b1c-1e1c-44ed-9235-1204cb2736b6" (UID: "c31e1b1c-1e1c-44ed-9235-1204cb2736b6"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.565875 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c31e1b1c-1e1c-44ed-9235-1204cb2736b6-kube-api-access-c2cjl" (OuterVolumeSpecName: "kube-api-access-c2cjl") pod "c31e1b1c-1e1c-44ed-9235-1204cb2736b6" (UID: "c31e1b1c-1e1c-44ed-9235-1204cb2736b6"). InnerVolumeSpecName "kube-api-access-c2cjl". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.566484 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.619727 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.634589 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.660110 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c2cjl\" (UniqueName: \"kubernetes.io/projected/c31e1b1c-1e1c-44ed-9235-1204cb2736b6-kube-api-access-c2cjl\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.660175 4716 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/c31e1b1c-1e1c-44ed-9235-1204cb2736b6-additional-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:40 crc kubenswrapper[4716]: I1209 15:31:40.660188 4716 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c31e1b1c-1e1c-44ed-9235-1204cb2736b6-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:41 crc kubenswrapper[4716]: I1209 15:31:41.041940 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-8mm7h-config-zl4cz" event={"ID":"c31e1b1c-1e1c-44ed-9235-1204cb2736b6","Type":"ContainerDied","Data":"00be117262fef7776d1d2f6fc519407c26334a0fcae0a23d571879ca81df08fc"} Dec 09 15:31:41 crc kubenswrapper[4716]: I1209 15:31:41.042238 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="00be117262fef7776d1d2f6fc519407c26334a0fcae0a23d571879ca81df08fc" Dec 09 15:31:41 crc kubenswrapper[4716]: I1209 15:31:41.041966 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-8mm7h-config-zl4cz" Dec 09 15:31:41 crc kubenswrapper[4716]: I1209 15:31:41.043833 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:41 crc kubenswrapper[4716]: I1209 15:31:41.133181 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Dec 09 15:31:41 crc kubenswrapper[4716]: I1209 15:31:41.166598 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-zf7ll"] Dec 09 15:31:41 crc kubenswrapper[4716]: I1209 15:31:41.183643 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-8mm7h-config-zl4cz"] Dec 09 15:31:41 crc kubenswrapper[4716]: I1209 15:31:41.194740 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-8mm7h-config-zl4cz"] Dec 09 15:31:41 crc kubenswrapper[4716]: I1209 15:31:41.226692 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c31e1b1c-1e1c-44ed-9235-1204cb2736b6" path="/var/lib/kubelet/pods/c31e1b1c-1e1c-44ed-9235-1204cb2736b6/volumes" Dec 09 15:31:42 crc kubenswrapper[4716]: I1209 15:31:42.052200 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"54db648f-e02a-4af0-b425-458a51844527","Type":"ContainerStarted","Data":"2b53ca95af9149a9f357a5a3671500ec6fcf423490f121045121a7dc2cae4b84"} Dec 09 15:31:42 crc kubenswrapper[4716]: I1209 15:31:42.054497 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-zf7ll" event={"ID":"cf4177cf-f9cb-4e7f-94d8-bec58db07c2f","Type":"ContainerStarted","Data":"b2f74ff7785b2e59ce11f5d6f3997301f6e0b1e06490de875caf22b8308f7ff3"} Dec 09 15:31:43 crc kubenswrapper[4716]: I1209 15:31:43.066011 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"54db648f-e02a-4af0-b425-458a51844527","Type":"ContainerStarted","Data":"7b10665d8101b492b30036beb3454a3d4ff99888786d6ffd4bb6a587a8d7d584"} Dec 09 15:31:43 crc kubenswrapper[4716]: I1209 15:31:43.237959 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-0" podStartSLOduration=2.666849049 podStartE2EDuration="4.237939279s" podCreationTimestamp="2025-12-09 15:31:39 +0000 UTC" firstStartedPulling="2025-12-09 15:31:41.145006948 +0000 UTC m=+1388.299750936" lastFinishedPulling="2025-12-09 15:31:42.716097178 +0000 UTC m=+1389.870841166" observedRunningTime="2025-12-09 15:31:43.093783924 +0000 UTC m=+1390.248527912" watchObservedRunningTime="2025-12-09 15:31:43.237939279 +0000 UTC m=+1390.392683267" Dec 09 15:31:43 crc kubenswrapper[4716]: I1209 15:31:43.435076 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Dec 09 15:31:43 crc kubenswrapper[4716]: I1209 15:31:43.652729 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 09 15:31:43 crc kubenswrapper[4716]: I1209 15:31:43.653458 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="be971cf7-04bf-4487-a95a-64bb6ea739a7" containerName="prometheus" containerID="cri-o://3eef08a816bc1eafd7fa2e0b1f2a20ca53072de5f719ca6acf909ffe27ba4b76" gracePeriod=600 Dec 09 15:31:43 crc kubenswrapper[4716]: I1209 15:31:43.653928 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="be971cf7-04bf-4487-a95a-64bb6ea739a7" containerName="thanos-sidecar" containerID="cri-o://233616a3e553356b3b94b254ac098b7f41f0c48aae3f0b388d21c65c8e9fd525" gracePeriod=600 Dec 09 15:31:43 crc kubenswrapper[4716]: I1209 15:31:43.654070 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="be971cf7-04bf-4487-a95a-64bb6ea739a7" containerName="config-reloader" containerID="cri-o://73878a8e6e6c560039ba66167d3b145a0ba1c67e7b7cc4295d8381ea0c4b041c" gracePeriod=600 Dec 09 15:31:44 crc kubenswrapper[4716]: I1209 15:31:44.084120 4716 generic.go:334] "Generic (PLEG): container finished" podID="2e140762-44f7-46f9-9bbe-a8f780186869" containerID="30392210ad09dadc4c995e968b32b6d395c64e8fffbf6808673e45e67027cea0" exitCode=0 Dec 09 15:31:44 crc kubenswrapper[4716]: I1209 15:31:44.084213 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"2e140762-44f7-46f9-9bbe-a8f780186869","Type":"ContainerDied","Data":"30392210ad09dadc4c995e968b32b6d395c64e8fffbf6808673e45e67027cea0"} Dec 09 15:31:44 crc kubenswrapper[4716]: I1209 15:31:44.090198 4716 generic.go:334] "Generic (PLEG): container finished" podID="be971cf7-04bf-4487-a95a-64bb6ea739a7" containerID="233616a3e553356b3b94b254ac098b7f41f0c48aae3f0b388d21c65c8e9fd525" exitCode=0 Dec 09 15:31:44 crc kubenswrapper[4716]: I1209 15:31:44.090238 4716 generic.go:334] "Generic (PLEG): container finished" podID="be971cf7-04bf-4487-a95a-64bb6ea739a7" containerID="3eef08a816bc1eafd7fa2e0b1f2a20ca53072de5f719ca6acf909ffe27ba4b76" exitCode=0 Dec 09 15:31:44 crc kubenswrapper[4716]: I1209 15:31:44.090254 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"be971cf7-04bf-4487-a95a-64bb6ea739a7","Type":"ContainerDied","Data":"233616a3e553356b3b94b254ac098b7f41f0c48aae3f0b388d21c65c8e9fd525"} Dec 09 15:31:44 crc kubenswrapper[4716]: I1209 15:31:44.090335 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"be971cf7-04bf-4487-a95a-64bb6ea739a7","Type":"ContainerDied","Data":"3eef08a816bc1eafd7fa2e0b1f2a20ca53072de5f719ca6acf909ffe27ba4b76"} Dec 09 15:31:44 crc kubenswrapper[4716]: I1209 15:31:44.092685 4716 generic.go:334] "Generic (PLEG): container finished" podID="f417726f-0022-42f5-bfe8-79f6605d557c" containerID="ac28d70edbf9692f0120f0aceb7ea5384f675a7413c2f464665d3e608f33765c" exitCode=0 Dec 09 15:31:44 crc kubenswrapper[4716]: I1209 15:31:44.092769 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f417726f-0022-42f5-bfe8-79f6605d557c","Type":"ContainerDied","Data":"ac28d70edbf9692f0120f0aceb7ea5384f675a7413c2f464665d3e608f33765c"} Dec 09 15:31:44 crc kubenswrapper[4716]: I1209 15:31:44.607114 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:44 crc kubenswrapper[4716]: I1209 15:31:44.768786 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6fhcb\" (UniqueName: \"kubernetes.io/projected/be971cf7-04bf-4487-a95a-64bb6ea739a7-kube-api-access-6fhcb\") pod \"be971cf7-04bf-4487-a95a-64bb6ea739a7\" (UID: \"be971cf7-04bf-4487-a95a-64bb6ea739a7\") " Dec 09 15:31:44 crc kubenswrapper[4716]: I1209 15:31:44.768857 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/be971cf7-04bf-4487-a95a-64bb6ea739a7-config\") pod \"be971cf7-04bf-4487-a95a-64bb6ea739a7\" (UID: \"be971cf7-04bf-4487-a95a-64bb6ea739a7\") " Dec 09 15:31:44 crc kubenswrapper[4716]: I1209 15:31:44.768922 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"be971cf7-04bf-4487-a95a-64bb6ea739a7\" (UID: \"be971cf7-04bf-4487-a95a-64bb6ea739a7\") " Dec 09 15:31:44 crc kubenswrapper[4716]: I1209 15:31:44.768976 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/be971cf7-04bf-4487-a95a-64bb6ea739a7-config-out\") pod \"be971cf7-04bf-4487-a95a-64bb6ea739a7\" (UID: \"be971cf7-04bf-4487-a95a-64bb6ea739a7\") " Dec 09 15:31:44 crc kubenswrapper[4716]: I1209 15:31:44.769012 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/be971cf7-04bf-4487-a95a-64bb6ea739a7-tls-assets\") pod \"be971cf7-04bf-4487-a95a-64bb6ea739a7\" (UID: \"be971cf7-04bf-4487-a95a-64bb6ea739a7\") " Dec 09 15:31:44 crc kubenswrapper[4716]: I1209 15:31:44.769076 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/be971cf7-04bf-4487-a95a-64bb6ea739a7-web-config\") pod \"be971cf7-04bf-4487-a95a-64bb6ea739a7\" (UID: \"be971cf7-04bf-4487-a95a-64bb6ea739a7\") " Dec 09 15:31:44 crc kubenswrapper[4716]: I1209 15:31:44.769103 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/be971cf7-04bf-4487-a95a-64bb6ea739a7-thanos-prometheus-http-client-file\") pod \"be971cf7-04bf-4487-a95a-64bb6ea739a7\" (UID: \"be971cf7-04bf-4487-a95a-64bb6ea739a7\") " Dec 09 15:31:44 crc kubenswrapper[4716]: I1209 15:31:44.769133 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/be971cf7-04bf-4487-a95a-64bb6ea739a7-prometheus-metric-storage-rulefiles-0\") pod \"be971cf7-04bf-4487-a95a-64bb6ea739a7\" (UID: \"be971cf7-04bf-4487-a95a-64bb6ea739a7\") " Dec 09 15:31:44 crc kubenswrapper[4716]: I1209 15:31:44.770455 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/be971cf7-04bf-4487-a95a-64bb6ea739a7-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "be971cf7-04bf-4487-a95a-64bb6ea739a7" (UID: "be971cf7-04bf-4487-a95a-64bb6ea739a7"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:31:44 crc kubenswrapper[4716]: I1209 15:31:44.776189 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be971cf7-04bf-4487-a95a-64bb6ea739a7-kube-api-access-6fhcb" (OuterVolumeSpecName: "kube-api-access-6fhcb") pod "be971cf7-04bf-4487-a95a-64bb6ea739a7" (UID: "be971cf7-04bf-4487-a95a-64bb6ea739a7"). InnerVolumeSpecName "kube-api-access-6fhcb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:31:44 crc kubenswrapper[4716]: I1209 15:31:44.801501 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be971cf7-04bf-4487-a95a-64bb6ea739a7-config" (OuterVolumeSpecName: "config") pod "be971cf7-04bf-4487-a95a-64bb6ea739a7" (UID: "be971cf7-04bf-4487-a95a-64bb6ea739a7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:31:44 crc kubenswrapper[4716]: I1209 15:31:44.801746 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be971cf7-04bf-4487-a95a-64bb6ea739a7-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "be971cf7-04bf-4487-a95a-64bb6ea739a7" (UID: "be971cf7-04bf-4487-a95a-64bb6ea739a7"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:31:44 crc kubenswrapper[4716]: I1209 15:31:44.802266 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "be971cf7-04bf-4487-a95a-64bb6ea739a7" (UID: "be971cf7-04bf-4487-a95a-64bb6ea739a7"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 09 15:31:44 crc kubenswrapper[4716]: I1209 15:31:44.806212 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/be971cf7-04bf-4487-a95a-64bb6ea739a7-config-out" (OuterVolumeSpecName: "config-out") pod "be971cf7-04bf-4487-a95a-64bb6ea739a7" (UID: "be971cf7-04bf-4487-a95a-64bb6ea739a7"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:31:44 crc kubenswrapper[4716]: I1209 15:31:44.807179 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be971cf7-04bf-4487-a95a-64bb6ea739a7-web-config" (OuterVolumeSpecName: "web-config") pod "be971cf7-04bf-4487-a95a-64bb6ea739a7" (UID: "be971cf7-04bf-4487-a95a-64bb6ea739a7"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:31:44 crc kubenswrapper[4716]: I1209 15:31:44.821976 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be971cf7-04bf-4487-a95a-64bb6ea739a7-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "be971cf7-04bf-4487-a95a-64bb6ea739a7" (UID: "be971cf7-04bf-4487-a95a-64bb6ea739a7"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:31:44 crc kubenswrapper[4716]: I1209 15:31:44.874435 4716 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/be971cf7-04bf-4487-a95a-64bb6ea739a7-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:44 crc kubenswrapper[4716]: I1209 15:31:44.874486 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6fhcb\" (UniqueName: \"kubernetes.io/projected/be971cf7-04bf-4487-a95a-64bb6ea739a7-kube-api-access-6fhcb\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:44 crc kubenswrapper[4716]: I1209 15:31:44.874497 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/be971cf7-04bf-4487-a95a-64bb6ea739a7-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:44 crc kubenswrapper[4716]: I1209 15:31:44.874521 4716 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Dec 09 15:31:44 crc kubenswrapper[4716]: I1209 15:31:44.874532 4716 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/be971cf7-04bf-4487-a95a-64bb6ea739a7-config-out\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:44 crc kubenswrapper[4716]: I1209 15:31:44.874541 4716 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/be971cf7-04bf-4487-a95a-64bb6ea739a7-tls-assets\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:44 crc kubenswrapper[4716]: I1209 15:31:44.874550 4716 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/be971cf7-04bf-4487-a95a-64bb6ea739a7-web-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:44 crc kubenswrapper[4716]: I1209 15:31:44.874560 4716 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/be971cf7-04bf-4487-a95a-64bb6ea739a7-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:44 crc kubenswrapper[4716]: I1209 15:31:44.923191 4716 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Dec 09 15:31:44 crc kubenswrapper[4716]: I1209 15:31:44.977011 4716 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.107354 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"2e140762-44f7-46f9-9bbe-a8f780186869","Type":"ContainerStarted","Data":"7696f50534aecaa2dbb9aaa696207604da35af9cd5ff4df8ef5c7fbb7c79ce7d"} Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.108884 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.115314 4716 generic.go:334] "Generic (PLEG): container finished" podID="be971cf7-04bf-4487-a95a-64bb6ea739a7" containerID="73878a8e6e6c560039ba66167d3b145a0ba1c67e7b7cc4295d8381ea0c4b041c" exitCode=0 Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.115394 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"be971cf7-04bf-4487-a95a-64bb6ea739a7","Type":"ContainerDied","Data":"73878a8e6e6c560039ba66167d3b145a0ba1c67e7b7cc4295d8381ea0c4b041c"} Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.115429 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"be971cf7-04bf-4487-a95a-64bb6ea739a7","Type":"ContainerDied","Data":"06fc611ec5adb9a1f5d6cd266b417bef2da5cba51aed5cd3b1cf260c843ce958"} Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.115467 4716 scope.go:117] "RemoveContainer" containerID="233616a3e553356b3b94b254ac098b7f41f0c48aae3f0b388d21c65c8e9fd525" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.115722 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.131435 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f417726f-0022-42f5-bfe8-79f6605d557c","Type":"ContainerStarted","Data":"f9f96b7789a17dba8336bea793242c1bb90dd8c961187d7e058079dc8f34f68f"} Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.132639 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.141744 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=41.819432399 podStartE2EDuration="1m47.141720259s" podCreationTimestamp="2025-12-09 15:29:58 +0000 UTC" firstStartedPulling="2025-12-09 15:30:04.389428912 +0000 UTC m=+1291.544172900" lastFinishedPulling="2025-12-09 15:31:09.711716772 +0000 UTC m=+1356.866460760" observedRunningTime="2025-12-09 15:31:45.140151013 +0000 UTC m=+1392.294895001" watchObservedRunningTime="2025-12-09 15:31:45.141720259 +0000 UTC m=+1392.296464247" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.174463 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.176858 4716 scope.go:117] "RemoveContainer" containerID="73878a8e6e6c560039ba66167d3b145a0ba1c67e7b7cc4295d8381ea0c4b041c" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.186908 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.220452 4716 scope.go:117] "RemoveContainer" containerID="3eef08a816bc1eafd7fa2e0b1f2a20ca53072de5f719ca6acf909ffe27ba4b76" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.225798 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=41.831496752 podStartE2EDuration="1m46.225773771s" podCreationTimestamp="2025-12-09 15:29:59 +0000 UTC" firstStartedPulling="2025-12-09 15:30:05.316486466 +0000 UTC m=+1292.471230454" lastFinishedPulling="2025-12-09 15:31:09.710763485 +0000 UTC m=+1356.865507473" observedRunningTime="2025-12-09 15:31:45.207418062 +0000 UTC m=+1392.362162060" watchObservedRunningTime="2025-12-09 15:31:45.225773771 +0000 UTC m=+1392.380517769" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.235985 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be971cf7-04bf-4487-a95a-64bb6ea739a7" path="/var/lib/kubelet/pods/be971cf7-04bf-4487-a95a-64bb6ea739a7/volumes" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.237119 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 09 15:31:45 crc kubenswrapper[4716]: E1209 15:31:45.237581 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be971cf7-04bf-4487-a95a-64bb6ea739a7" containerName="init-config-reloader" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.237606 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="be971cf7-04bf-4487-a95a-64bb6ea739a7" containerName="init-config-reloader" Dec 09 15:31:45 crc kubenswrapper[4716]: E1209 15:31:45.238363 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be971cf7-04bf-4487-a95a-64bb6ea739a7" containerName="config-reloader" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.238402 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="be971cf7-04bf-4487-a95a-64bb6ea739a7" containerName="config-reloader" Dec 09 15:31:45 crc kubenswrapper[4716]: E1209 15:31:45.238425 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be971cf7-04bf-4487-a95a-64bb6ea739a7" containerName="thanos-sidecar" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.238434 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="be971cf7-04bf-4487-a95a-64bb6ea739a7" containerName="thanos-sidecar" Dec 09 15:31:45 crc kubenswrapper[4716]: E1209 15:31:45.238447 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be971cf7-04bf-4487-a95a-64bb6ea739a7" containerName="prometheus" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.238453 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="be971cf7-04bf-4487-a95a-64bb6ea739a7" containerName="prometheus" Dec 09 15:31:45 crc kubenswrapper[4716]: E1209 15:31:45.238477 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c31e1b1c-1e1c-44ed-9235-1204cb2736b6" containerName="ovn-config" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.238484 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="c31e1b1c-1e1c-44ed-9235-1204cb2736b6" containerName="ovn-config" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.238788 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="be971cf7-04bf-4487-a95a-64bb6ea739a7" containerName="config-reloader" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.238835 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="be971cf7-04bf-4487-a95a-64bb6ea739a7" containerName="prometheus" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.238848 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="be971cf7-04bf-4487-a95a-64bb6ea739a7" containerName="thanos-sidecar" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.238860 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="c31e1b1c-1e1c-44ed-9235-1204cb2736b6" containerName="ovn-config" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.241325 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.244344 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.244784 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-d2rpr" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.244948 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.245309 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.246082 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.246661 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.254393 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.259387 4716 scope.go:117] "RemoveContainer" containerID="495261baab8203428ad1e59ee3fec94e586c67c04630e4401708f91e3200401e" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.279287 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.299783 4716 scope.go:117] "RemoveContainer" containerID="233616a3e553356b3b94b254ac098b7f41f0c48aae3f0b388d21c65c8e9fd525" Dec 09 15:31:45 crc kubenswrapper[4716]: E1209 15:31:45.300377 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"233616a3e553356b3b94b254ac098b7f41f0c48aae3f0b388d21c65c8e9fd525\": container with ID starting with 233616a3e553356b3b94b254ac098b7f41f0c48aae3f0b388d21c65c8e9fd525 not found: ID does not exist" containerID="233616a3e553356b3b94b254ac098b7f41f0c48aae3f0b388d21c65c8e9fd525" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.300405 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"233616a3e553356b3b94b254ac098b7f41f0c48aae3f0b388d21c65c8e9fd525"} err="failed to get container status \"233616a3e553356b3b94b254ac098b7f41f0c48aae3f0b388d21c65c8e9fd525\": rpc error: code = NotFound desc = could not find container \"233616a3e553356b3b94b254ac098b7f41f0c48aae3f0b388d21c65c8e9fd525\": container with ID starting with 233616a3e553356b3b94b254ac098b7f41f0c48aae3f0b388d21c65c8e9fd525 not found: ID does not exist" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.300427 4716 scope.go:117] "RemoveContainer" containerID="73878a8e6e6c560039ba66167d3b145a0ba1c67e7b7cc4295d8381ea0c4b041c" Dec 09 15:31:45 crc kubenswrapper[4716]: E1209 15:31:45.300735 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"73878a8e6e6c560039ba66167d3b145a0ba1c67e7b7cc4295d8381ea0c4b041c\": container with ID starting with 73878a8e6e6c560039ba66167d3b145a0ba1c67e7b7cc4295d8381ea0c4b041c not found: ID does not exist" containerID="73878a8e6e6c560039ba66167d3b145a0ba1c67e7b7cc4295d8381ea0c4b041c" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.300757 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"73878a8e6e6c560039ba66167d3b145a0ba1c67e7b7cc4295d8381ea0c4b041c"} err="failed to get container status \"73878a8e6e6c560039ba66167d3b145a0ba1c67e7b7cc4295d8381ea0c4b041c\": rpc error: code = NotFound desc = could not find container \"73878a8e6e6c560039ba66167d3b145a0ba1c67e7b7cc4295d8381ea0c4b041c\": container with ID starting with 73878a8e6e6c560039ba66167d3b145a0ba1c67e7b7cc4295d8381ea0c4b041c not found: ID does not exist" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.300769 4716 scope.go:117] "RemoveContainer" containerID="3eef08a816bc1eafd7fa2e0b1f2a20ca53072de5f719ca6acf909ffe27ba4b76" Dec 09 15:31:45 crc kubenswrapper[4716]: E1209 15:31:45.302414 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3eef08a816bc1eafd7fa2e0b1f2a20ca53072de5f719ca6acf909ffe27ba4b76\": container with ID starting with 3eef08a816bc1eafd7fa2e0b1f2a20ca53072de5f719ca6acf909ffe27ba4b76 not found: ID does not exist" containerID="3eef08a816bc1eafd7fa2e0b1f2a20ca53072de5f719ca6acf909ffe27ba4b76" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.302466 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3eef08a816bc1eafd7fa2e0b1f2a20ca53072de5f719ca6acf909ffe27ba4b76"} err="failed to get container status \"3eef08a816bc1eafd7fa2e0b1f2a20ca53072de5f719ca6acf909ffe27ba4b76\": rpc error: code = NotFound desc = could not find container \"3eef08a816bc1eafd7fa2e0b1f2a20ca53072de5f719ca6acf909ffe27ba4b76\": container with ID starting with 3eef08a816bc1eafd7fa2e0b1f2a20ca53072de5f719ca6acf909ffe27ba4b76 not found: ID does not exist" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.302501 4716 scope.go:117] "RemoveContainer" containerID="495261baab8203428ad1e59ee3fec94e586c67c04630e4401708f91e3200401e" Dec 09 15:31:45 crc kubenswrapper[4716]: E1209 15:31:45.302981 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"495261baab8203428ad1e59ee3fec94e586c67c04630e4401708f91e3200401e\": container with ID starting with 495261baab8203428ad1e59ee3fec94e586c67c04630e4401708f91e3200401e not found: ID does not exist" containerID="495261baab8203428ad1e59ee3fec94e586c67c04630e4401708f91e3200401e" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.303018 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"495261baab8203428ad1e59ee3fec94e586c67c04630e4401708f91e3200401e"} err="failed to get container status \"495261baab8203428ad1e59ee3fec94e586c67c04630e4401708f91e3200401e\": rpc error: code = NotFound desc = could not find container \"495261baab8203428ad1e59ee3fec94e586c67c04630e4401708f91e3200401e\": container with ID starting with 495261baab8203428ad1e59ee3fec94e586c67c04630e4401708f91e3200401e not found: ID does not exist" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.386900 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/4076ed92-7690-4a97-b3d6-53a64842c96e-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"4076ed92-7690-4a97-b3d6-53a64842c96e\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.387218 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"prometheus-metric-storage-0\" (UID: \"4076ed92-7690-4a97-b3d6-53a64842c96e\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.387256 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/4076ed92-7690-4a97-b3d6-53a64842c96e-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"4076ed92-7690-4a97-b3d6-53a64842c96e\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.387320 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/4076ed92-7690-4a97-b3d6-53a64842c96e-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"4076ed92-7690-4a97-b3d6-53a64842c96e\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.387353 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/4076ed92-7690-4a97-b3d6-53a64842c96e-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"4076ed92-7690-4a97-b3d6-53a64842c96e\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.387408 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/4076ed92-7690-4a97-b3d6-53a64842c96e-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"4076ed92-7690-4a97-b3d6-53a64842c96e\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.387486 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/4076ed92-7690-4a97-b3d6-53a64842c96e-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"4076ed92-7690-4a97-b3d6-53a64842c96e\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.387592 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4076ed92-7690-4a97-b3d6-53a64842c96e-config\") pod \"prometheus-metric-storage-0\" (UID: \"4076ed92-7690-4a97-b3d6-53a64842c96e\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.387617 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4076ed92-7690-4a97-b3d6-53a64842c96e-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"4076ed92-7690-4a97-b3d6-53a64842c96e\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.387680 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/4076ed92-7690-4a97-b3d6-53a64842c96e-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"4076ed92-7690-4a97-b3d6-53a64842c96e\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.387704 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pv5jr\" (UniqueName: \"kubernetes.io/projected/4076ed92-7690-4a97-b3d6-53a64842c96e-kube-api-access-pv5jr\") pod \"prometheus-metric-storage-0\" (UID: \"4076ed92-7690-4a97-b3d6-53a64842c96e\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.489589 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/4076ed92-7690-4a97-b3d6-53a64842c96e-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"4076ed92-7690-4a97-b3d6-53a64842c96e\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.489697 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"prometheus-metric-storage-0\" (UID: \"4076ed92-7690-4a97-b3d6-53a64842c96e\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.489734 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/4076ed92-7690-4a97-b3d6-53a64842c96e-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"4076ed92-7690-4a97-b3d6-53a64842c96e\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.489796 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/4076ed92-7690-4a97-b3d6-53a64842c96e-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"4076ed92-7690-4a97-b3d6-53a64842c96e\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.489830 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/4076ed92-7690-4a97-b3d6-53a64842c96e-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"4076ed92-7690-4a97-b3d6-53a64842c96e\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.489884 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/4076ed92-7690-4a97-b3d6-53a64842c96e-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"4076ed92-7690-4a97-b3d6-53a64842c96e\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.489953 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/4076ed92-7690-4a97-b3d6-53a64842c96e-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"4076ed92-7690-4a97-b3d6-53a64842c96e\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.490036 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4076ed92-7690-4a97-b3d6-53a64842c96e-config\") pod \"prometheus-metric-storage-0\" (UID: \"4076ed92-7690-4a97-b3d6-53a64842c96e\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.490091 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4076ed92-7690-4a97-b3d6-53a64842c96e-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"4076ed92-7690-4a97-b3d6-53a64842c96e\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.490128 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/4076ed92-7690-4a97-b3d6-53a64842c96e-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"4076ed92-7690-4a97-b3d6-53a64842c96e\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.490145 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pv5jr\" (UniqueName: \"kubernetes.io/projected/4076ed92-7690-4a97-b3d6-53a64842c96e-kube-api-access-pv5jr\") pod \"prometheus-metric-storage-0\" (UID: \"4076ed92-7690-4a97-b3d6-53a64842c96e\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.491257 4716 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"prometheus-metric-storage-0\" (UID: \"4076ed92-7690-4a97-b3d6-53a64842c96e\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.492123 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/4076ed92-7690-4a97-b3d6-53a64842c96e-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"4076ed92-7690-4a97-b3d6-53a64842c96e\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.495892 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/4076ed92-7690-4a97-b3d6-53a64842c96e-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"4076ed92-7690-4a97-b3d6-53a64842c96e\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.496480 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/4076ed92-7690-4a97-b3d6-53a64842c96e-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"4076ed92-7690-4a97-b3d6-53a64842c96e\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.497242 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/4076ed92-7690-4a97-b3d6-53a64842c96e-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"4076ed92-7690-4a97-b3d6-53a64842c96e\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.497898 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4076ed92-7690-4a97-b3d6-53a64842c96e-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"4076ed92-7690-4a97-b3d6-53a64842c96e\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.498124 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/4076ed92-7690-4a97-b3d6-53a64842c96e-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"4076ed92-7690-4a97-b3d6-53a64842c96e\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.499941 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/4076ed92-7690-4a97-b3d6-53a64842c96e-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"4076ed92-7690-4a97-b3d6-53a64842c96e\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.507332 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/4076ed92-7690-4a97-b3d6-53a64842c96e-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"4076ed92-7690-4a97-b3d6-53a64842c96e\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.508076 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/4076ed92-7690-4a97-b3d6-53a64842c96e-config\") pod \"prometheus-metric-storage-0\" (UID: \"4076ed92-7690-4a97-b3d6-53a64842c96e\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.512726 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pv5jr\" (UniqueName: \"kubernetes.io/projected/4076ed92-7690-4a97-b3d6-53a64842c96e-kube-api-access-pv5jr\") pod \"prometheus-metric-storage-0\" (UID: \"4076ed92-7690-4a97-b3d6-53a64842c96e\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.544833 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"prometheus-metric-storage-0\" (UID: \"4076ed92-7690-4a97-b3d6-53a64842c96e\") " pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:45 crc kubenswrapper[4716]: I1209 15:31:45.566823 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Dec 09 15:31:46 crc kubenswrapper[4716]: I1209 15:31:46.065603 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 09 15:31:46 crc kubenswrapper[4716]: I1209 15:31:46.159181 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"4076ed92-7690-4a97-b3d6-53a64842c96e","Type":"ContainerStarted","Data":"38d1d3500d4f5edea3daddbc7a5635fe3e350b3ec49e24e045db20b6686b8703"} Dec 09 15:31:46 crc kubenswrapper[4716]: I1209 15:31:46.378796 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-8mm7h" Dec 09 15:31:46 crc kubenswrapper[4716]: I1209 15:31:46.923125 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/552d079e-332b-46db-946b-2777875f3dc7-etc-swift\") pod \"swift-storage-0\" (UID: \"552d079e-332b-46db-946b-2777875f3dc7\") " pod="openstack/swift-storage-0" Dec 09 15:31:46 crc kubenswrapper[4716]: I1209 15:31:46.950842 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/552d079e-332b-46db-946b-2777875f3dc7-etc-swift\") pod \"swift-storage-0\" (UID: \"552d079e-332b-46db-946b-2777875f3dc7\") " pod="openstack/swift-storage-0" Dec 09 15:31:47 crc kubenswrapper[4716]: I1209 15:31:47.029337 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Dec 09 15:31:47 crc kubenswrapper[4716]: I1209 15:31:47.657360 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Dec 09 15:31:48 crc kubenswrapper[4716]: I1209 15:31:48.198873 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"552d079e-332b-46db-946b-2777875f3dc7","Type":"ContainerStarted","Data":"7e4f7b19790a53aeaa857ed4167f44d95f26b7d6135faad550f4d5da298f1c7c"} Dec 09 15:31:50 crc kubenswrapper[4716]: I1209 15:31:50.227388 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"4076ed92-7690-4a97-b3d6-53a64842c96e","Type":"ContainerStarted","Data":"5deae5dada216f1c782018e799c696359c4173c311aae9f3d533bcb56cd90c6a"} Dec 09 15:31:55 crc kubenswrapper[4716]: I1209 15:31:55.315336 4716 generic.go:334] "Generic (PLEG): container finished" podID="4076ed92-7690-4a97-b3d6-53a64842c96e" containerID="5deae5dada216f1c782018e799c696359c4173c311aae9f3d533bcb56cd90c6a" exitCode=0 Dec 09 15:31:55 crc kubenswrapper[4716]: I1209 15:31:55.315379 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"4076ed92-7690-4a97-b3d6-53a64842c96e","Type":"ContainerDied","Data":"5deae5dada216f1c782018e799c696359c4173c311aae9f3d533bcb56cd90c6a"} Dec 09 15:31:57 crc kubenswrapper[4716]: I1209 15:31:57.362148 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"552d079e-332b-46db-946b-2777875f3dc7","Type":"ContainerStarted","Data":"f6c326c863cbe445c319192a96293ed754d2c6cde41d070dcc904b7f92e28deb"} Dec 09 15:31:57 crc kubenswrapper[4716]: I1209 15:31:57.362699 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"552d079e-332b-46db-946b-2777875f3dc7","Type":"ContainerStarted","Data":"e920578d5e09478f90a65e18805f265238c2a94e3a6d68fd3ba9cb6823fdfbb4"} Dec 09 15:31:57 crc kubenswrapper[4716]: I1209 15:31:57.362714 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"552d079e-332b-46db-946b-2777875f3dc7","Type":"ContainerStarted","Data":"6be4b534134313b501a2077311d6d26ed22a21d9636a6f70f36d9e9a54fa0147"} Dec 09 15:31:57 crc kubenswrapper[4716]: I1209 15:31:57.370580 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"4076ed92-7690-4a97-b3d6-53a64842c96e","Type":"ContainerStarted","Data":"58a8986143e983728910181ee2fb3f0c149a1bcf639c479261f90bb525cfc45c"} Dec 09 15:31:57 crc kubenswrapper[4716]: I1209 15:31:57.372308 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-zf7ll" event={"ID":"cf4177cf-f9cb-4e7f-94d8-bec58db07c2f","Type":"ContainerStarted","Data":"f911366deb5be34ddbd72b25d8fc89f32ff62065ea6c93a230d269e1ad867d39"} Dec 09 15:31:58 crc kubenswrapper[4716]: I1209 15:31:58.401946 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"552d079e-332b-46db-946b-2777875f3dc7","Type":"ContainerStarted","Data":"ad0c06dd3ff0f9f29fc42a68283b74575bfc219050dfaced702c8d6a58fc8b85"} Dec 09 15:31:59 crc kubenswrapper[4716]: I1209 15:31:59.424809 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"552d079e-332b-46db-946b-2777875f3dc7","Type":"ContainerStarted","Data":"173f5104e6eaf7095f223e668de97abed500436e6de2ff7314c43ab9e3a86e83"} Dec 09 15:32:00 crc kubenswrapper[4716]: I1209 15:32:00.440429 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"552d079e-332b-46db-946b-2777875f3dc7","Type":"ContainerStarted","Data":"30f7c6c90461991964f69c6a36c48913a0087cc94d081a4d7ca9ad815f92bafe"} Dec 09 15:32:00 crc kubenswrapper[4716]: I1209 15:32:00.440938 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"552d079e-332b-46db-946b-2777875f3dc7","Type":"ContainerStarted","Data":"7ecc883d935bc3273f9b62a9597f463e0ef9b0fa17ce45d4e28764acd70b09b7"} Dec 09 15:32:00 crc kubenswrapper[4716]: I1209 15:32:00.440958 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"552d079e-332b-46db-946b-2777875f3dc7","Type":"ContainerStarted","Data":"de53bd4d4eca7302a63e34cdf90d6eeaa8e0d76f5d74eac69d43e489a28be4b8"} Dec 09 15:32:00 crc kubenswrapper[4716]: I1209 15:32:00.443862 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"4076ed92-7690-4a97-b3d6-53a64842c96e","Type":"ContainerStarted","Data":"727394c7dcb4b9669a902ca097aaa20ace0c5879f5cf589994101df8d880cd67"} Dec 09 15:32:00 crc kubenswrapper[4716]: I1209 15:32:00.443910 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"4076ed92-7690-4a97-b3d6-53a64842c96e","Type":"ContainerStarted","Data":"a8974b99ecb62e6e3b0ab8770956eab74cbb4b958c0a478d37ab652938509e2a"} Dec 09 15:32:00 crc kubenswrapper[4716]: I1209 15:32:00.471418 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=15.471399649 podStartE2EDuration="15.471399649s" podCreationTimestamp="2025-12-09 15:31:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:32:00.469782573 +0000 UTC m=+1407.624526571" watchObservedRunningTime="2025-12-09 15:32:00.471399649 +0000 UTC m=+1407.626143637" Dec 09 15:32:00 crc kubenswrapper[4716]: I1209 15:32:00.474861 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-zf7ll" podStartSLOduration=4.995281428 podStartE2EDuration="20.474840878s" podCreationTimestamp="2025-12-09 15:31:40 +0000 UTC" firstStartedPulling="2025-12-09 15:31:41.164920172 +0000 UTC m=+1388.319664160" lastFinishedPulling="2025-12-09 15:31:56.644479622 +0000 UTC m=+1403.799223610" observedRunningTime="2025-12-09 15:31:57.395136307 +0000 UTC m=+1404.549880295" watchObservedRunningTime="2025-12-09 15:32:00.474840878 +0000 UTC m=+1407.629584866" Dec 09 15:32:00 crc kubenswrapper[4716]: I1209 15:32:00.569359 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Dec 09 15:32:00 crc kubenswrapper[4716]: I1209 15:32:00.569410 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Dec 09 15:32:00 crc kubenswrapper[4716]: I1209 15:32:00.575929 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Dec 09 15:32:00 crc kubenswrapper[4716]: I1209 15:32:00.680876 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.039792 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-9v9w2"] Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.041737 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-9v9w2" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.049170 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-9v9w2"] Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.151390 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/45c934d3-830f-4a67-ae5f-cd703dbed98c-operator-scripts\") pod \"cinder-db-create-9v9w2\" (UID: \"45c934d3-830f-4a67-ae5f-cd703dbed98c\") " pod="openstack/cinder-db-create-9v9w2" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.151538 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p4m6b\" (UniqueName: \"kubernetes.io/projected/45c934d3-830f-4a67-ae5f-cd703dbed98c-kube-api-access-p4m6b\") pod \"cinder-db-create-9v9w2\" (UID: \"45c934d3-830f-4a67-ae5f-cd703dbed98c\") " pod="openstack/cinder-db-create-9v9w2" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.248677 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-bde5-account-create-update-jtrwz"] Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.250181 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-bde5-account-create-update-jtrwz" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.253108 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/45c934d3-830f-4a67-ae5f-cd703dbed98c-operator-scripts\") pod \"cinder-db-create-9v9w2\" (UID: \"45c934d3-830f-4a67-ae5f-cd703dbed98c\") " pod="openstack/cinder-db-create-9v9w2" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.253198 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p4m6b\" (UniqueName: \"kubernetes.io/projected/45c934d3-830f-4a67-ae5f-cd703dbed98c-kube-api-access-p4m6b\") pod \"cinder-db-create-9v9w2\" (UID: \"45c934d3-830f-4a67-ae5f-cd703dbed98c\") " pod="openstack/cinder-db-create-9v9w2" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.253887 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-db-secret" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.254263 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/45c934d3-830f-4a67-ae5f-cd703dbed98c-operator-scripts\") pod \"cinder-db-create-9v9w2\" (UID: \"45c934d3-830f-4a67-ae5f-cd703dbed98c\") " pod="openstack/cinder-db-create-9v9w2" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.280616 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-cp69g"] Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.282905 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-cp69g" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.293701 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-bde5-account-create-update-jtrwz"] Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.295630 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p4m6b\" (UniqueName: \"kubernetes.io/projected/45c934d3-830f-4a67-ae5f-cd703dbed98c-kube-api-access-p4m6b\") pod \"cinder-db-create-9v9w2\" (UID: \"45c934d3-830f-4a67-ae5f-cd703dbed98c\") " pod="openstack/cinder-db-create-9v9w2" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.304488 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-cp69g"] Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.355540 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/456491de-2a30-4a09-8a30-de5a3c8ef790-operator-scripts\") pod \"heat-bde5-account-create-update-jtrwz\" (UID: \"456491de-2a30-4a09-8a30-de5a3c8ef790\") " pod="openstack/heat-bde5-account-create-update-jtrwz" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.355615 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7qqnj\" (UniqueName: \"kubernetes.io/projected/75caa54f-bdef-4ae6-ba85-94c61ddc84f0-kube-api-access-7qqnj\") pod \"barbican-db-create-cp69g\" (UID: \"75caa54f-bdef-4ae6-ba85-94c61ddc84f0\") " pod="openstack/barbican-db-create-cp69g" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.356006 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/75caa54f-bdef-4ae6-ba85-94c61ddc84f0-operator-scripts\") pod \"barbican-db-create-cp69g\" (UID: \"75caa54f-bdef-4ae6-ba85-94c61ddc84f0\") " pod="openstack/barbican-db-create-cp69g" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.356071 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w5798\" (UniqueName: \"kubernetes.io/projected/456491de-2a30-4a09-8a30-de5a3c8ef790-kube-api-access-w5798\") pod \"heat-bde5-account-create-update-jtrwz\" (UID: \"456491de-2a30-4a09-8a30-de5a3c8ef790\") " pod="openstack/heat-bde5-account-create-update-jtrwz" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.411683 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-9v9w2" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.461209 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/456491de-2a30-4a09-8a30-de5a3c8ef790-operator-scripts\") pod \"heat-bde5-account-create-update-jtrwz\" (UID: \"456491de-2a30-4a09-8a30-de5a3c8ef790\") " pod="openstack/heat-bde5-account-create-update-jtrwz" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.461277 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7qqnj\" (UniqueName: \"kubernetes.io/projected/75caa54f-bdef-4ae6-ba85-94c61ddc84f0-kube-api-access-7qqnj\") pod \"barbican-db-create-cp69g\" (UID: \"75caa54f-bdef-4ae6-ba85-94c61ddc84f0\") " pod="openstack/barbican-db-create-cp69g" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.461406 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-d961-account-create-update-jk7hm"] Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.461550 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/75caa54f-bdef-4ae6-ba85-94c61ddc84f0-operator-scripts\") pod \"barbican-db-create-cp69g\" (UID: \"75caa54f-bdef-4ae6-ba85-94c61ddc84f0\") " pod="openstack/barbican-db-create-cp69g" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.461595 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w5798\" (UniqueName: \"kubernetes.io/projected/456491de-2a30-4a09-8a30-de5a3c8ef790-kube-api-access-w5798\") pod \"heat-bde5-account-create-update-jtrwz\" (UID: \"456491de-2a30-4a09-8a30-de5a3c8ef790\") " pod="openstack/heat-bde5-account-create-update-jtrwz" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.462373 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/456491de-2a30-4a09-8a30-de5a3c8ef790-operator-scripts\") pod \"heat-bde5-account-create-update-jtrwz\" (UID: \"456491de-2a30-4a09-8a30-de5a3c8ef790\") " pod="openstack/heat-bde5-account-create-update-jtrwz" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.463039 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-d961-account-create-update-jk7hm" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.463054 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/75caa54f-bdef-4ae6-ba85-94c61ddc84f0-operator-scripts\") pod \"barbican-db-create-cp69g\" (UID: \"75caa54f-bdef-4ae6-ba85-94c61ddc84f0\") " pod="openstack/barbican-db-create-cp69g" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.465984 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.478660 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-d961-account-create-update-jk7hm"] Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.522371 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.528680 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w5798\" (UniqueName: \"kubernetes.io/projected/456491de-2a30-4a09-8a30-de5a3c8ef790-kube-api-access-w5798\") pod \"heat-bde5-account-create-update-jtrwz\" (UID: \"456491de-2a30-4a09-8a30-de5a3c8ef790\") " pod="openstack/heat-bde5-account-create-update-jtrwz" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.552807 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7qqnj\" (UniqueName: \"kubernetes.io/projected/75caa54f-bdef-4ae6-ba85-94c61ddc84f0-kube-api-access-7qqnj\") pod \"barbican-db-create-cp69g\" (UID: \"75caa54f-bdef-4ae6-ba85-94c61ddc84f0\") " pod="openstack/barbican-db-create-cp69g" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.564256 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/812fdc3c-a04b-4074-9c33-2e4e2b82496d-operator-scripts\") pod \"barbican-d961-account-create-update-jk7hm\" (UID: \"812fdc3c-a04b-4074-9c33-2e4e2b82496d\") " pod="openstack/barbican-d961-account-create-update-jk7hm" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.564484 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pqhvx\" (UniqueName: \"kubernetes.io/projected/812fdc3c-a04b-4074-9c33-2e4e2b82496d-kube-api-access-pqhvx\") pod \"barbican-d961-account-create-update-jk7hm\" (UID: \"812fdc3c-a04b-4074-9c33-2e4e2b82496d\") " pod="openstack/barbican-d961-account-create-update-jk7hm" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.597174 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-bde5-account-create-update-jtrwz" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.625271 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-3ed5-account-create-update-c4v8k"] Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.627427 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-3ed5-account-create-update-c4v8k" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.645349 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.666491 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqh9q\" (UniqueName: \"kubernetes.io/projected/eff29269-58ab-4659-82bb-6bef2fd7d5d1-kube-api-access-mqh9q\") pod \"cinder-3ed5-account-create-update-c4v8k\" (UID: \"eff29269-58ab-4659-82bb-6bef2fd7d5d1\") " pod="openstack/cinder-3ed5-account-create-update-c4v8k" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.666545 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pqhvx\" (UniqueName: \"kubernetes.io/projected/812fdc3c-a04b-4074-9c33-2e4e2b82496d-kube-api-access-pqhvx\") pod \"barbican-d961-account-create-update-jk7hm\" (UID: \"812fdc3c-a04b-4074-9c33-2e4e2b82496d\") " pod="openstack/barbican-d961-account-create-update-jk7hm" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.666765 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eff29269-58ab-4659-82bb-6bef2fd7d5d1-operator-scripts\") pod \"cinder-3ed5-account-create-update-c4v8k\" (UID: \"eff29269-58ab-4659-82bb-6bef2fd7d5d1\") " pod="openstack/cinder-3ed5-account-create-update-c4v8k" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.666963 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-create-xsvm8"] Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.666992 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/812fdc3c-a04b-4074-9c33-2e4e2b82496d-operator-scripts\") pod \"barbican-d961-account-create-update-jk7hm\" (UID: \"812fdc3c-a04b-4074-9c33-2e4e2b82496d\") " pod="openstack/barbican-d961-account-create-update-jk7hm" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.667892 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/812fdc3c-a04b-4074-9c33-2e4e2b82496d-operator-scripts\") pod \"barbican-d961-account-create-update-jk7hm\" (UID: \"812fdc3c-a04b-4074-9c33-2e4e2b82496d\") " pod="openstack/barbican-d961-account-create-update-jk7hm" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.668378 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-xsvm8" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.704236 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-cp69g" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.731323 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pqhvx\" (UniqueName: \"kubernetes.io/projected/812fdc3c-a04b-4074-9c33-2e4e2b82496d-kube-api-access-pqhvx\") pod \"barbican-d961-account-create-update-jk7hm\" (UID: \"812fdc3c-a04b-4074-9c33-2e4e2b82496d\") " pod="openstack/barbican-d961-account-create-update-jk7hm" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.748742 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-3ed5-account-create-update-c4v8k"] Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.768931 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eff29269-58ab-4659-82bb-6bef2fd7d5d1-operator-scripts\") pod \"cinder-3ed5-account-create-update-c4v8k\" (UID: \"eff29269-58ab-4659-82bb-6bef2fd7d5d1\") " pod="openstack/cinder-3ed5-account-create-update-c4v8k" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.769011 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8zxmx\" (UniqueName: \"kubernetes.io/projected/3533a208-b517-4082-9d3b-17baddccedfd-kube-api-access-8zxmx\") pod \"heat-db-create-xsvm8\" (UID: \"3533a208-b517-4082-9d3b-17baddccedfd\") " pod="openstack/heat-db-create-xsvm8" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.769156 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3533a208-b517-4082-9d3b-17baddccedfd-operator-scripts\") pod \"heat-db-create-xsvm8\" (UID: \"3533a208-b517-4082-9d3b-17baddccedfd\") " pod="openstack/heat-db-create-xsvm8" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.769209 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqh9q\" (UniqueName: \"kubernetes.io/projected/eff29269-58ab-4659-82bb-6bef2fd7d5d1-kube-api-access-mqh9q\") pod \"cinder-3ed5-account-create-update-c4v8k\" (UID: \"eff29269-58ab-4659-82bb-6bef2fd7d5d1\") " pod="openstack/cinder-3ed5-account-create-update-c4v8k" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.770519 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eff29269-58ab-4659-82bb-6bef2fd7d5d1-operator-scripts\") pod \"cinder-3ed5-account-create-update-c4v8k\" (UID: \"eff29269-58ab-4659-82bb-6bef2fd7d5d1\") " pod="openstack/cinder-3ed5-account-create-update-c4v8k" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.786863 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-xsvm8"] Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.831068 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqh9q\" (UniqueName: \"kubernetes.io/projected/eff29269-58ab-4659-82bb-6bef2fd7d5d1-kube-api-access-mqh9q\") pod \"cinder-3ed5-account-create-update-c4v8k\" (UID: \"eff29269-58ab-4659-82bb-6bef2fd7d5d1\") " pod="openstack/cinder-3ed5-account-create-update-c4v8k" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.899355 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3533a208-b517-4082-9d3b-17baddccedfd-operator-scripts\") pod \"heat-db-create-xsvm8\" (UID: \"3533a208-b517-4082-9d3b-17baddccedfd\") " pod="openstack/heat-db-create-xsvm8" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.899835 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8zxmx\" (UniqueName: \"kubernetes.io/projected/3533a208-b517-4082-9d3b-17baddccedfd-kube-api-access-8zxmx\") pod \"heat-db-create-xsvm8\" (UID: \"3533a208-b517-4082-9d3b-17baddccedfd\") " pod="openstack/heat-db-create-xsvm8" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.902384 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3533a208-b517-4082-9d3b-17baddccedfd-operator-scripts\") pod \"heat-db-create-xsvm8\" (UID: \"3533a208-b517-4082-9d3b-17baddccedfd\") " pod="openstack/heat-db-create-xsvm8" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.941869 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-pzgnj"] Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.957838 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-pzgnj" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.964150 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-kzrzm" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.964378 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.964514 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.985304 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Dec 09 15:32:01 crc kubenswrapper[4716]: I1209 15:32:01.992155 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8zxmx\" (UniqueName: \"kubernetes.io/projected/3533a208-b517-4082-9d3b-17baddccedfd-kube-api-access-8zxmx\") pod \"heat-db-create-xsvm8\" (UID: \"3533a208-b517-4082-9d3b-17baddccedfd\") " pod="openstack/heat-db-create-xsvm8" Dec 09 15:32:02 crc kubenswrapper[4716]: I1209 15:32:02.015400 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-d961-account-create-update-jk7hm" Dec 09 15:32:02 crc kubenswrapper[4716]: I1209 15:32:02.024873 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-pzgnj"] Dec 09 15:32:02 crc kubenswrapper[4716]: I1209 15:32:02.056826 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-62b3-account-create-update-nwf77"] Dec 09 15:32:02 crc kubenswrapper[4716]: I1209 15:32:02.058424 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-62b3-account-create-update-nwf77" Dec 09 15:32:02 crc kubenswrapper[4716]: I1209 15:32:02.062869 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Dec 09 15:32:02 crc kubenswrapper[4716]: I1209 15:32:02.089814 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-62b3-account-create-update-nwf77"] Dec 09 15:32:02 crc kubenswrapper[4716]: I1209 15:32:02.100399 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-3ed5-account-create-update-c4v8k" Dec 09 15:32:02 crc kubenswrapper[4716]: I1209 15:32:02.105586 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-44qkv\" (UniqueName: \"kubernetes.io/projected/c120b5a8-f662-4839-be5c-b9d94e80ab72-kube-api-access-44qkv\") pod \"keystone-db-sync-pzgnj\" (UID: \"c120b5a8-f662-4839-be5c-b9d94e80ab72\") " pod="openstack/keystone-db-sync-pzgnj" Dec 09 15:32:02 crc kubenswrapper[4716]: I1209 15:32:02.105669 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c120b5a8-f662-4839-be5c-b9d94e80ab72-config-data\") pod \"keystone-db-sync-pzgnj\" (UID: \"c120b5a8-f662-4839-be5c-b9d94e80ab72\") " pod="openstack/keystone-db-sync-pzgnj" Dec 09 15:32:02 crc kubenswrapper[4716]: I1209 15:32:02.105772 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c120b5a8-f662-4839-be5c-b9d94e80ab72-combined-ca-bundle\") pod \"keystone-db-sync-pzgnj\" (UID: \"c120b5a8-f662-4839-be5c-b9d94e80ab72\") " pod="openstack/keystone-db-sync-pzgnj" Dec 09 15:32:02 crc kubenswrapper[4716]: I1209 15:32:02.147265 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-mg54n"] Dec 09 15:32:02 crc kubenswrapper[4716]: I1209 15:32:02.148370 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-xsvm8" Dec 09 15:32:02 crc kubenswrapper[4716]: I1209 15:32:02.154561 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-mg54n" Dec 09 15:32:02 crc kubenswrapper[4716]: I1209 15:32:02.194218 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-mg54n"] Dec 09 15:32:02 crc kubenswrapper[4716]: I1209 15:32:02.209897 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c120b5a8-f662-4839-be5c-b9d94e80ab72-config-data\") pod \"keystone-db-sync-pzgnj\" (UID: \"c120b5a8-f662-4839-be5c-b9d94e80ab72\") " pod="openstack/keystone-db-sync-pzgnj" Dec 09 15:32:02 crc kubenswrapper[4716]: I1209 15:32:02.210048 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c120b5a8-f662-4839-be5c-b9d94e80ab72-combined-ca-bundle\") pod \"keystone-db-sync-pzgnj\" (UID: \"c120b5a8-f662-4839-be5c-b9d94e80ab72\") " pod="openstack/keystone-db-sync-pzgnj" Dec 09 15:32:02 crc kubenswrapper[4716]: I1209 15:32:02.210077 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9559f453-f355-4947-9078-0dc08b47d647-operator-scripts\") pod \"neutron-62b3-account-create-update-nwf77\" (UID: \"9559f453-f355-4947-9078-0dc08b47d647\") " pod="openstack/neutron-62b3-account-create-update-nwf77" Dec 09 15:32:02 crc kubenswrapper[4716]: I1209 15:32:02.210166 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bqg2h\" (UniqueName: \"kubernetes.io/projected/9559f453-f355-4947-9078-0dc08b47d647-kube-api-access-bqg2h\") pod \"neutron-62b3-account-create-update-nwf77\" (UID: \"9559f453-f355-4947-9078-0dc08b47d647\") " pod="openstack/neutron-62b3-account-create-update-nwf77" Dec 09 15:32:02 crc kubenswrapper[4716]: I1209 15:32:02.210227 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-44qkv\" (UniqueName: \"kubernetes.io/projected/c120b5a8-f662-4839-be5c-b9d94e80ab72-kube-api-access-44qkv\") pod \"keystone-db-sync-pzgnj\" (UID: \"c120b5a8-f662-4839-be5c-b9d94e80ab72\") " pod="openstack/keystone-db-sync-pzgnj" Dec 09 15:32:02 crc kubenswrapper[4716]: I1209 15:32:02.222482 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c120b5a8-f662-4839-be5c-b9d94e80ab72-config-data\") pod \"keystone-db-sync-pzgnj\" (UID: \"c120b5a8-f662-4839-be5c-b9d94e80ab72\") " pod="openstack/keystone-db-sync-pzgnj" Dec 09 15:32:02 crc kubenswrapper[4716]: I1209 15:32:02.231422 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c120b5a8-f662-4839-be5c-b9d94e80ab72-combined-ca-bundle\") pod \"keystone-db-sync-pzgnj\" (UID: \"c120b5a8-f662-4839-be5c-b9d94e80ab72\") " pod="openstack/keystone-db-sync-pzgnj" Dec 09 15:32:02 crc kubenswrapper[4716]: I1209 15:32:02.245896 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-44qkv\" (UniqueName: \"kubernetes.io/projected/c120b5a8-f662-4839-be5c-b9d94e80ab72-kube-api-access-44qkv\") pod \"keystone-db-sync-pzgnj\" (UID: \"c120b5a8-f662-4839-be5c-b9d94e80ab72\") " pod="openstack/keystone-db-sync-pzgnj" Dec 09 15:32:02 crc kubenswrapper[4716]: I1209 15:32:02.312938 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bqg2h\" (UniqueName: \"kubernetes.io/projected/9559f453-f355-4947-9078-0dc08b47d647-kube-api-access-bqg2h\") pod \"neutron-62b3-account-create-update-nwf77\" (UID: \"9559f453-f355-4947-9078-0dc08b47d647\") " pod="openstack/neutron-62b3-account-create-update-nwf77" Dec 09 15:32:02 crc kubenswrapper[4716]: I1209 15:32:02.313485 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xc2d5\" (UniqueName: \"kubernetes.io/projected/958ef966-3a33-491c-98fc-b13f19437e00-kube-api-access-xc2d5\") pod \"neutron-db-create-mg54n\" (UID: \"958ef966-3a33-491c-98fc-b13f19437e00\") " pod="openstack/neutron-db-create-mg54n" Dec 09 15:32:02 crc kubenswrapper[4716]: I1209 15:32:02.313565 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/958ef966-3a33-491c-98fc-b13f19437e00-operator-scripts\") pod \"neutron-db-create-mg54n\" (UID: \"958ef966-3a33-491c-98fc-b13f19437e00\") " pod="openstack/neutron-db-create-mg54n" Dec 09 15:32:02 crc kubenswrapper[4716]: I1209 15:32:02.313667 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9559f453-f355-4947-9078-0dc08b47d647-operator-scripts\") pod \"neutron-62b3-account-create-update-nwf77\" (UID: \"9559f453-f355-4947-9078-0dc08b47d647\") " pod="openstack/neutron-62b3-account-create-update-nwf77" Dec 09 15:32:02 crc kubenswrapper[4716]: I1209 15:32:02.314485 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9559f453-f355-4947-9078-0dc08b47d647-operator-scripts\") pod \"neutron-62b3-account-create-update-nwf77\" (UID: \"9559f453-f355-4947-9078-0dc08b47d647\") " pod="openstack/neutron-62b3-account-create-update-nwf77" Dec 09 15:32:02 crc kubenswrapper[4716]: I1209 15:32:02.333409 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-pzgnj" Dec 09 15:32:02 crc kubenswrapper[4716]: I1209 15:32:02.348766 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bqg2h\" (UniqueName: \"kubernetes.io/projected/9559f453-f355-4947-9078-0dc08b47d647-kube-api-access-bqg2h\") pod \"neutron-62b3-account-create-update-nwf77\" (UID: \"9559f453-f355-4947-9078-0dc08b47d647\") " pod="openstack/neutron-62b3-account-create-update-nwf77" Dec 09 15:32:02 crc kubenswrapper[4716]: I1209 15:32:02.413298 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-62b3-account-create-update-nwf77" Dec 09 15:32:02 crc kubenswrapper[4716]: I1209 15:32:02.415268 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xc2d5\" (UniqueName: \"kubernetes.io/projected/958ef966-3a33-491c-98fc-b13f19437e00-kube-api-access-xc2d5\") pod \"neutron-db-create-mg54n\" (UID: \"958ef966-3a33-491c-98fc-b13f19437e00\") " pod="openstack/neutron-db-create-mg54n" Dec 09 15:32:02 crc kubenswrapper[4716]: I1209 15:32:02.415370 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/958ef966-3a33-491c-98fc-b13f19437e00-operator-scripts\") pod \"neutron-db-create-mg54n\" (UID: \"958ef966-3a33-491c-98fc-b13f19437e00\") " pod="openstack/neutron-db-create-mg54n" Dec 09 15:32:02 crc kubenswrapper[4716]: I1209 15:32:02.416695 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/958ef966-3a33-491c-98fc-b13f19437e00-operator-scripts\") pod \"neutron-db-create-mg54n\" (UID: \"958ef966-3a33-491c-98fc-b13f19437e00\") " pod="openstack/neutron-db-create-mg54n" Dec 09 15:32:02 crc kubenswrapper[4716]: I1209 15:32:02.457021 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xc2d5\" (UniqueName: \"kubernetes.io/projected/958ef966-3a33-491c-98fc-b13f19437e00-kube-api-access-xc2d5\") pod \"neutron-db-create-mg54n\" (UID: \"958ef966-3a33-491c-98fc-b13f19437e00\") " pod="openstack/neutron-db-create-mg54n" Dec 09 15:32:02 crc kubenswrapper[4716]: I1209 15:32:02.493171 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-mg54n" Dec 09 15:32:02 crc kubenswrapper[4716]: I1209 15:32:02.592904 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-9v9w2"] Dec 09 15:32:03 crc kubenswrapper[4716]: I1209 15:32:03.352474 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:32:03 crc kubenswrapper[4716]: I1209 15:32:03.532230 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-9v9w2" event={"ID":"45c934d3-830f-4a67-ae5f-cd703dbed98c","Type":"ContainerStarted","Data":"f25ad235bcdc75e917781586c096cf42a973e8feb294b3d09d5d34b6434ae5b9"} Dec 09 15:32:04 crc kubenswrapper[4716]: I1209 15:32:04.052440 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-d961-account-create-update-jk7hm"] Dec 09 15:32:04 crc kubenswrapper[4716]: I1209 15:32:04.098061 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-bde5-account-create-update-jtrwz"] Dec 09 15:32:04 crc kubenswrapper[4716]: I1209 15:32:04.293174 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-pzgnj"] Dec 09 15:32:04 crc kubenswrapper[4716]: I1209 15:32:04.328946 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-cp69g"] Dec 09 15:32:04 crc kubenswrapper[4716]: I1209 15:32:04.390700 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-xsvm8"] Dec 09 15:32:04 crc kubenswrapper[4716]: I1209 15:32:04.606336 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-9v9w2" event={"ID":"45c934d3-830f-4a67-ae5f-cd703dbed98c","Type":"ContainerStarted","Data":"130427c60649e5cd4be163e1625b38d357f73119d3cc0503d072756d878970b7"} Dec 09 15:32:04 crc kubenswrapper[4716]: I1209 15:32:04.632356 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-d961-account-create-update-jk7hm" event={"ID":"812fdc3c-a04b-4074-9c33-2e4e2b82496d","Type":"ContainerStarted","Data":"68529f57b8d389c7276459301b445664348755873e63f0f0c1279dbdc3417249"} Dec 09 15:32:04 crc kubenswrapper[4716]: I1209 15:32:04.649421 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-create-9v9w2" podStartSLOduration=3.649398524 podStartE2EDuration="3.649398524s" podCreationTimestamp="2025-12-09 15:32:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:32:04.645264425 +0000 UTC m=+1411.800008413" watchObservedRunningTime="2025-12-09 15:32:04.649398524 +0000 UTC m=+1411.804142512" Dec 09 15:32:04 crc kubenswrapper[4716]: I1209 15:32:04.655843 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-bde5-account-create-update-jtrwz" event={"ID":"456491de-2a30-4a09-8a30-de5a3c8ef790","Type":"ContainerStarted","Data":"3909d54231733f2b2917cf392b06ece68110c20d51b4a0c7b2626ccf121474e8"} Dec 09 15:32:04 crc kubenswrapper[4716]: I1209 15:32:04.728533 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"552d079e-332b-46db-946b-2777875f3dc7","Type":"ContainerStarted","Data":"48425df526634f7e2081c6f1cd5157b83afe2434b37c9b6b346577d192870ed2"} Dec 09 15:32:04 crc kubenswrapper[4716]: I1209 15:32:04.728584 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"552d079e-332b-46db-946b-2777875f3dc7","Type":"ContainerStarted","Data":"8b32497574dea8add8a989263d36d1cd123d25b4cf02349af9f75ef0b92627c4"} Dec 09 15:32:04 crc kubenswrapper[4716]: I1209 15:32:04.732687 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-cp69g" event={"ID":"75caa54f-bdef-4ae6-ba85-94c61ddc84f0","Type":"ContainerStarted","Data":"e081c60e9f623a770c8cea004e6561d3eecd31707185e08eb6317ab00797f6ca"} Dec 09 15:32:04 crc kubenswrapper[4716]: I1209 15:32:04.753188 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-xsvm8" event={"ID":"3533a208-b517-4082-9d3b-17baddccedfd","Type":"ContainerStarted","Data":"bb13bf668546dfed8e057baba45358100eec485b23fde7db8b462b482e294d47"} Dec 09 15:32:04 crc kubenswrapper[4716]: I1209 15:32:04.800998 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-3ed5-account-create-update-c4v8k"] Dec 09 15:32:04 crc kubenswrapper[4716]: I1209 15:32:04.801056 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-pzgnj" event={"ID":"c120b5a8-f662-4839-be5c-b9d94e80ab72","Type":"ContainerStarted","Data":"d3e3092793e659f7d795a677c15d1da2098169f159e397ede709c0d2af6e04be"} Dec 09 15:32:04 crc kubenswrapper[4716]: I1209 15:32:04.837608 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-mg54n"] Dec 09 15:32:04 crc kubenswrapper[4716]: I1209 15:32:04.951471 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-62b3-account-create-update-nwf77"] Dec 09 15:32:04 crc kubenswrapper[4716]: W1209 15:32:04.968109 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9559f453_f355_4947_9078_0dc08b47d647.slice/crio-3c5ce693c915d3887c77d733ad40d65103f56c05b3e94207b78beb880eee1ac3 WatchSource:0}: Error finding container 3c5ce693c915d3887c77d733ad40d65103f56c05b3e94207b78beb880eee1ac3: Status 404 returned error can't find the container with id 3c5ce693c915d3887c77d733ad40d65103f56c05b3e94207b78beb880eee1ac3 Dec 09 15:32:05 crc kubenswrapper[4716]: I1209 15:32:05.818952 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-bde5-account-create-update-jtrwz" event={"ID":"456491de-2a30-4a09-8a30-de5a3c8ef790","Type":"ContainerStarted","Data":"c221a3151493f2b140614c7cfe1253a17de8501008871448aed0c97c464fcecf"} Dec 09 15:32:05 crc kubenswrapper[4716]: I1209 15:32:05.833081 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"552d079e-332b-46db-946b-2777875f3dc7","Type":"ContainerStarted","Data":"59c9c95da21cb9fe3c189b77e1a6426fbac8af1fa3cc23e636cca26300488e5d"} Dec 09 15:32:05 crc kubenswrapper[4716]: I1209 15:32:05.833136 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"552d079e-332b-46db-946b-2777875f3dc7","Type":"ContainerStarted","Data":"9c45e264f68db35db4c0f43b11ada5ef29584c903a978b37f5020d9dbfa066ca"} Dec 09 15:32:05 crc kubenswrapper[4716]: I1209 15:32:05.835160 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-62b3-account-create-update-nwf77" event={"ID":"9559f453-f355-4947-9078-0dc08b47d647","Type":"ContainerStarted","Data":"a0a47b3b97966f336ea916ba548a6c11cba9f183ad4c6993f1f19ee1604dcfbe"} Dec 09 15:32:05 crc kubenswrapper[4716]: I1209 15:32:05.835192 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-62b3-account-create-update-nwf77" event={"ID":"9559f453-f355-4947-9078-0dc08b47d647","Type":"ContainerStarted","Data":"3c5ce693c915d3887c77d733ad40d65103f56c05b3e94207b78beb880eee1ac3"} Dec 09 15:32:05 crc kubenswrapper[4716]: I1209 15:32:05.851195 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-bde5-account-create-update-jtrwz" podStartSLOduration=4.851170241 podStartE2EDuration="4.851170241s" podCreationTimestamp="2025-12-09 15:32:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:32:05.839064652 +0000 UTC m=+1412.993808650" watchObservedRunningTime="2025-12-09 15:32:05.851170241 +0000 UTC m=+1413.005914229" Dec 09 15:32:05 crc kubenswrapper[4716]: I1209 15:32:05.854393 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-xsvm8" event={"ID":"3533a208-b517-4082-9d3b-17baddccedfd","Type":"ContainerStarted","Data":"931a1a63605551cbbfa8c8b8e6265e961811a71f88c3d30b8c212b47c64f46a7"} Dec 09 15:32:05 crc kubenswrapper[4716]: I1209 15:32:05.862449 4716 generic.go:334] "Generic (PLEG): container finished" podID="45c934d3-830f-4a67-ae5f-cd703dbed98c" containerID="130427c60649e5cd4be163e1625b38d357f73119d3cc0503d072756d878970b7" exitCode=0 Dec 09 15:32:05 crc kubenswrapper[4716]: I1209 15:32:05.862527 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-9v9w2" event={"ID":"45c934d3-830f-4a67-ae5f-cd703dbed98c","Type":"ContainerDied","Data":"130427c60649e5cd4be163e1625b38d357f73119d3cc0503d072756d878970b7"} Dec 09 15:32:05 crc kubenswrapper[4716]: I1209 15:32:05.873239 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-62b3-account-create-update-nwf77" podStartSLOduration=4.873213436 podStartE2EDuration="4.873213436s" podCreationTimestamp="2025-12-09 15:32:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:32:05.862440026 +0000 UTC m=+1413.017184014" watchObservedRunningTime="2025-12-09 15:32:05.873213436 +0000 UTC m=+1413.027957424" Dec 09 15:32:05 crc kubenswrapper[4716]: I1209 15:32:05.880379 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-d961-account-create-update-jk7hm" event={"ID":"812fdc3c-a04b-4074-9c33-2e4e2b82496d","Type":"ContainerStarted","Data":"709509d41f485759711038d93c8054610717fcbc075bc5943916085cf24cf18c"} Dec 09 15:32:05 crc kubenswrapper[4716]: I1209 15:32:05.887827 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-3ed5-account-create-update-c4v8k" event={"ID":"eff29269-58ab-4659-82bb-6bef2fd7d5d1","Type":"ContainerStarted","Data":"d3cc0b443877252c4d440de7341385ef4db2275850d795350c3794ff6b7e01cd"} Dec 09 15:32:05 crc kubenswrapper[4716]: I1209 15:32:05.887884 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-3ed5-account-create-update-c4v8k" event={"ID":"eff29269-58ab-4659-82bb-6bef2fd7d5d1","Type":"ContainerStarted","Data":"d788959ffb376fca89d3be0a62a3e96478fba1f3b0952e63f1158adb6d89d818"} Dec 09 15:32:05 crc kubenswrapper[4716]: I1209 15:32:05.896525 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-mg54n" event={"ID":"958ef966-3a33-491c-98fc-b13f19437e00","Type":"ContainerStarted","Data":"98e01daabc507ce7dd36c622b9d24cd953dd88a862f2bad6c8c3ada8324338a9"} Dec 09 15:32:05 crc kubenswrapper[4716]: I1209 15:32:05.896576 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-mg54n" event={"ID":"958ef966-3a33-491c-98fc-b13f19437e00","Type":"ContainerStarted","Data":"2c73f5a5aea34aa75dd81edde918d958503f6c0e24d4ea47f7c7de3ee710f667"} Dec 09 15:32:05 crc kubenswrapper[4716]: I1209 15:32:05.898019 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-create-xsvm8" podStartSLOduration=4.89798184 podStartE2EDuration="4.89798184s" podCreationTimestamp="2025-12-09 15:32:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:32:05.880381743 +0000 UTC m=+1413.035125731" watchObservedRunningTime="2025-12-09 15:32:05.89798184 +0000 UTC m=+1413.052725828" Dec 09 15:32:05 crc kubenswrapper[4716]: I1209 15:32:05.904359 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-cp69g" event={"ID":"75caa54f-bdef-4ae6-ba85-94c61ddc84f0","Type":"ContainerStarted","Data":"5f3d50cb4d4a5c8a8ddff54f0236d1655a75a6e86006b513bc2122241886f195"} Dec 09 15:32:05 crc kubenswrapper[4716]: I1209 15:32:05.945631 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-d961-account-create-update-jk7hm" podStartSLOduration=4.945593172 podStartE2EDuration="4.945593172s" podCreationTimestamp="2025-12-09 15:32:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:32:05.917341978 +0000 UTC m=+1413.072085966" watchObservedRunningTime="2025-12-09 15:32:05.945593172 +0000 UTC m=+1413.100337160" Dec 09 15:32:05 crc kubenswrapper[4716]: I1209 15:32:05.953482 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-3ed5-account-create-update-c4v8k" podStartSLOduration=4.953462839 podStartE2EDuration="4.953462839s" podCreationTimestamp="2025-12-09 15:32:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:32:05.935083589 +0000 UTC m=+1413.089827587" watchObservedRunningTime="2025-12-09 15:32:05.953462839 +0000 UTC m=+1413.108206827" Dec 09 15:32:05 crc kubenswrapper[4716]: I1209 15:32:05.962014 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-create-cp69g" podStartSLOduration=4.961996185 podStartE2EDuration="4.961996185s" podCreationTimestamp="2025-12-09 15:32:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:32:05.957346921 +0000 UTC m=+1413.112090909" watchObservedRunningTime="2025-12-09 15:32:05.961996185 +0000 UTC m=+1413.116740173" Dec 09 15:32:05 crc kubenswrapper[4716]: I1209 15:32:05.998084 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-create-mg54n" podStartSLOduration=4.998052384 podStartE2EDuration="4.998052384s" podCreationTimestamp="2025-12-09 15:32:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:32:05.98438362 +0000 UTC m=+1413.139127608" watchObservedRunningTime="2025-12-09 15:32:05.998052384 +0000 UTC m=+1413.152796382" Dec 09 15:32:06 crc kubenswrapper[4716]: I1209 15:32:06.919220 4716 generic.go:334] "Generic (PLEG): container finished" podID="eff29269-58ab-4659-82bb-6bef2fd7d5d1" containerID="d3cc0b443877252c4d440de7341385ef4db2275850d795350c3794ff6b7e01cd" exitCode=0 Dec 09 15:32:06 crc kubenswrapper[4716]: I1209 15:32:06.920420 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-3ed5-account-create-update-c4v8k" event={"ID":"eff29269-58ab-4659-82bb-6bef2fd7d5d1","Type":"ContainerDied","Data":"d3cc0b443877252c4d440de7341385ef4db2275850d795350c3794ff6b7e01cd"} Dec 09 15:32:06 crc kubenswrapper[4716]: I1209 15:32:06.922775 4716 generic.go:334] "Generic (PLEG): container finished" podID="958ef966-3a33-491c-98fc-b13f19437e00" containerID="98e01daabc507ce7dd36c622b9d24cd953dd88a862f2bad6c8c3ada8324338a9" exitCode=0 Dec 09 15:32:06 crc kubenswrapper[4716]: I1209 15:32:06.922844 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-mg54n" event={"ID":"958ef966-3a33-491c-98fc-b13f19437e00","Type":"ContainerDied","Data":"98e01daabc507ce7dd36c622b9d24cd953dd88a862f2bad6c8c3ada8324338a9"} Dec 09 15:32:06 crc kubenswrapper[4716]: I1209 15:32:06.931306 4716 generic.go:334] "Generic (PLEG): container finished" podID="456491de-2a30-4a09-8a30-de5a3c8ef790" containerID="c221a3151493f2b140614c7cfe1253a17de8501008871448aed0c97c464fcecf" exitCode=0 Dec 09 15:32:06 crc kubenswrapper[4716]: I1209 15:32:06.931376 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-bde5-account-create-update-jtrwz" event={"ID":"456491de-2a30-4a09-8a30-de5a3c8ef790","Type":"ContainerDied","Data":"c221a3151493f2b140614c7cfe1253a17de8501008871448aed0c97c464fcecf"} Dec 09 15:32:06 crc kubenswrapper[4716]: I1209 15:32:06.942339 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"552d079e-332b-46db-946b-2777875f3dc7","Type":"ContainerStarted","Data":"363a2469ec99a5a43497efd642133bd7bc5fbe82b25e7ceaf5a4689f5502db6a"} Dec 09 15:32:06 crc kubenswrapper[4716]: I1209 15:32:06.942388 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"552d079e-332b-46db-946b-2777875f3dc7","Type":"ContainerStarted","Data":"6889f176ca7ef241f1736504b71a854390cb208c5a92881f677106204408d5fc"} Dec 09 15:32:06 crc kubenswrapper[4716]: I1209 15:32:06.942399 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"552d079e-332b-46db-946b-2777875f3dc7","Type":"ContainerStarted","Data":"de7cde6e2b560c1e6a8031a608a70f9afc482b69e965d80a32b34b0086950327"} Dec 09 15:32:06 crc kubenswrapper[4716]: I1209 15:32:06.945277 4716 generic.go:334] "Generic (PLEG): container finished" podID="9559f453-f355-4947-9078-0dc08b47d647" containerID="a0a47b3b97966f336ea916ba548a6c11cba9f183ad4c6993f1f19ee1604dcfbe" exitCode=0 Dec 09 15:32:06 crc kubenswrapper[4716]: I1209 15:32:06.945367 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-62b3-account-create-update-nwf77" event={"ID":"9559f453-f355-4947-9078-0dc08b47d647","Type":"ContainerDied","Data":"a0a47b3b97966f336ea916ba548a6c11cba9f183ad4c6993f1f19ee1604dcfbe"} Dec 09 15:32:06 crc kubenswrapper[4716]: I1209 15:32:06.947456 4716 generic.go:334] "Generic (PLEG): container finished" podID="75caa54f-bdef-4ae6-ba85-94c61ddc84f0" containerID="5f3d50cb4d4a5c8a8ddff54f0236d1655a75a6e86006b513bc2122241886f195" exitCode=0 Dec 09 15:32:06 crc kubenswrapper[4716]: I1209 15:32:06.947500 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-cp69g" event={"ID":"75caa54f-bdef-4ae6-ba85-94c61ddc84f0","Type":"ContainerDied","Data":"5f3d50cb4d4a5c8a8ddff54f0236d1655a75a6e86006b513bc2122241886f195"} Dec 09 15:32:06 crc kubenswrapper[4716]: I1209 15:32:06.950139 4716 generic.go:334] "Generic (PLEG): container finished" podID="3533a208-b517-4082-9d3b-17baddccedfd" containerID="931a1a63605551cbbfa8c8b8e6265e961811a71f88c3d30b8c212b47c64f46a7" exitCode=0 Dec 09 15:32:06 crc kubenswrapper[4716]: I1209 15:32:06.950207 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-xsvm8" event={"ID":"3533a208-b517-4082-9d3b-17baddccedfd","Type":"ContainerDied","Data":"931a1a63605551cbbfa8c8b8e6265e961811a71f88c3d30b8c212b47c64f46a7"} Dec 09 15:32:06 crc kubenswrapper[4716]: I1209 15:32:06.954771 4716 generic.go:334] "Generic (PLEG): container finished" podID="812fdc3c-a04b-4074-9c33-2e4e2b82496d" containerID="709509d41f485759711038d93c8054610717fcbc075bc5943916085cf24cf18c" exitCode=0 Dec 09 15:32:06 crc kubenswrapper[4716]: I1209 15:32:06.954887 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-d961-account-create-update-jk7hm" event={"ID":"812fdc3c-a04b-4074-9c33-2e4e2b82496d","Type":"ContainerDied","Data":"709509d41f485759711038d93c8054610717fcbc075bc5943916085cf24cf18c"} Dec 09 15:32:07 crc kubenswrapper[4716]: I1209 15:32:07.055400 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=38.535760503 podStartE2EDuration="54.055373608s" podCreationTimestamp="2025-12-09 15:31:13 +0000 UTC" firstStartedPulling="2025-12-09 15:31:47.669643366 +0000 UTC m=+1394.824387354" lastFinishedPulling="2025-12-09 15:32:03.189256471 +0000 UTC m=+1410.344000459" observedRunningTime="2025-12-09 15:32:07.052474324 +0000 UTC m=+1414.207218332" watchObservedRunningTime="2025-12-09 15:32:07.055373608 +0000 UTC m=+1414.210117596" Dec 09 15:32:07 crc kubenswrapper[4716]: I1209 15:32:07.441525 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-ww9d6"] Dec 09 15:32:07 crc kubenswrapper[4716]: I1209 15:32:07.445932 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-764c5664d7-ww9d6" Dec 09 15:32:07 crc kubenswrapper[4716]: I1209 15:32:07.449006 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Dec 09 15:32:07 crc kubenswrapper[4716]: I1209 15:32:07.456503 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-ww9d6"] Dec 09 15:32:07 crc kubenswrapper[4716]: I1209 15:32:07.471376 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-9v9w2" Dec 09 15:32:07 crc kubenswrapper[4716]: I1209 15:32:07.511727 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p4m6b\" (UniqueName: \"kubernetes.io/projected/45c934d3-830f-4a67-ae5f-cd703dbed98c-kube-api-access-p4m6b\") pod \"45c934d3-830f-4a67-ae5f-cd703dbed98c\" (UID: \"45c934d3-830f-4a67-ae5f-cd703dbed98c\") " Dec 09 15:32:07 crc kubenswrapper[4716]: I1209 15:32:07.511966 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/45c934d3-830f-4a67-ae5f-cd703dbed98c-operator-scripts\") pod \"45c934d3-830f-4a67-ae5f-cd703dbed98c\" (UID: \"45c934d3-830f-4a67-ae5f-cd703dbed98c\") " Dec 09 15:32:07 crc kubenswrapper[4716]: I1209 15:32:07.512264 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280-ovsdbserver-sb\") pod \"dnsmasq-dns-764c5664d7-ww9d6\" (UID: \"2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280\") " pod="openstack/dnsmasq-dns-764c5664d7-ww9d6" Dec 09 15:32:07 crc kubenswrapper[4716]: I1209 15:32:07.512305 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-992zw\" (UniqueName: \"kubernetes.io/projected/2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280-kube-api-access-992zw\") pod \"dnsmasq-dns-764c5664d7-ww9d6\" (UID: \"2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280\") " pod="openstack/dnsmasq-dns-764c5664d7-ww9d6" Dec 09 15:32:07 crc kubenswrapper[4716]: I1209 15:32:07.512321 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280-ovsdbserver-nb\") pod \"dnsmasq-dns-764c5664d7-ww9d6\" (UID: \"2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280\") " pod="openstack/dnsmasq-dns-764c5664d7-ww9d6" Dec 09 15:32:07 crc kubenswrapper[4716]: I1209 15:32:07.512389 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280-dns-svc\") pod \"dnsmasq-dns-764c5664d7-ww9d6\" (UID: \"2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280\") " pod="openstack/dnsmasq-dns-764c5664d7-ww9d6" Dec 09 15:32:07 crc kubenswrapper[4716]: I1209 15:32:07.512456 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280-dns-swift-storage-0\") pod \"dnsmasq-dns-764c5664d7-ww9d6\" (UID: \"2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280\") " pod="openstack/dnsmasq-dns-764c5664d7-ww9d6" Dec 09 15:32:07 crc kubenswrapper[4716]: I1209 15:32:07.512489 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280-config\") pod \"dnsmasq-dns-764c5664d7-ww9d6\" (UID: \"2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280\") " pod="openstack/dnsmasq-dns-764c5664d7-ww9d6" Dec 09 15:32:07 crc kubenswrapper[4716]: I1209 15:32:07.513615 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45c934d3-830f-4a67-ae5f-cd703dbed98c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "45c934d3-830f-4a67-ae5f-cd703dbed98c" (UID: "45c934d3-830f-4a67-ae5f-cd703dbed98c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:32:07 crc kubenswrapper[4716]: I1209 15:32:07.560002 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45c934d3-830f-4a67-ae5f-cd703dbed98c-kube-api-access-p4m6b" (OuterVolumeSpecName: "kube-api-access-p4m6b") pod "45c934d3-830f-4a67-ae5f-cd703dbed98c" (UID: "45c934d3-830f-4a67-ae5f-cd703dbed98c"). InnerVolumeSpecName "kube-api-access-p4m6b". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:32:07 crc kubenswrapper[4716]: I1209 15:32:07.615371 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280-config\") pod \"dnsmasq-dns-764c5664d7-ww9d6\" (UID: \"2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280\") " pod="openstack/dnsmasq-dns-764c5664d7-ww9d6" Dec 09 15:32:07 crc kubenswrapper[4716]: I1209 15:32:07.615475 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280-ovsdbserver-sb\") pod \"dnsmasq-dns-764c5664d7-ww9d6\" (UID: \"2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280\") " pod="openstack/dnsmasq-dns-764c5664d7-ww9d6" Dec 09 15:32:07 crc kubenswrapper[4716]: I1209 15:32:07.615530 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-992zw\" (UniqueName: \"kubernetes.io/projected/2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280-kube-api-access-992zw\") pod \"dnsmasq-dns-764c5664d7-ww9d6\" (UID: \"2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280\") " pod="openstack/dnsmasq-dns-764c5664d7-ww9d6" Dec 09 15:32:07 crc kubenswrapper[4716]: I1209 15:32:07.615554 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280-ovsdbserver-nb\") pod \"dnsmasq-dns-764c5664d7-ww9d6\" (UID: \"2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280\") " pod="openstack/dnsmasq-dns-764c5664d7-ww9d6" Dec 09 15:32:07 crc kubenswrapper[4716]: I1209 15:32:07.615664 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280-dns-svc\") pod \"dnsmasq-dns-764c5664d7-ww9d6\" (UID: \"2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280\") " pod="openstack/dnsmasq-dns-764c5664d7-ww9d6" Dec 09 15:32:07 crc kubenswrapper[4716]: I1209 15:32:07.615780 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280-dns-swift-storage-0\") pod \"dnsmasq-dns-764c5664d7-ww9d6\" (UID: \"2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280\") " pod="openstack/dnsmasq-dns-764c5664d7-ww9d6" Dec 09 15:32:07 crc kubenswrapper[4716]: I1209 15:32:07.615931 4716 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/45c934d3-830f-4a67-ae5f-cd703dbed98c-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:07 crc kubenswrapper[4716]: I1209 15:32:07.615956 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p4m6b\" (UniqueName: \"kubernetes.io/projected/45c934d3-830f-4a67-ae5f-cd703dbed98c-kube-api-access-p4m6b\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:07 crc kubenswrapper[4716]: I1209 15:32:07.616985 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280-dns-swift-storage-0\") pod \"dnsmasq-dns-764c5664d7-ww9d6\" (UID: \"2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280\") " pod="openstack/dnsmasq-dns-764c5664d7-ww9d6" Dec 09 15:32:07 crc kubenswrapper[4716]: I1209 15:32:07.617964 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280-config\") pod \"dnsmasq-dns-764c5664d7-ww9d6\" (UID: \"2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280\") " pod="openstack/dnsmasq-dns-764c5664d7-ww9d6" Dec 09 15:32:07 crc kubenswrapper[4716]: I1209 15:32:07.618444 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280-ovsdbserver-sb\") pod \"dnsmasq-dns-764c5664d7-ww9d6\" (UID: \"2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280\") " pod="openstack/dnsmasq-dns-764c5664d7-ww9d6" Dec 09 15:32:07 crc kubenswrapper[4716]: I1209 15:32:07.618564 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280-ovsdbserver-nb\") pod \"dnsmasq-dns-764c5664d7-ww9d6\" (UID: \"2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280\") " pod="openstack/dnsmasq-dns-764c5664d7-ww9d6" Dec 09 15:32:07 crc kubenswrapper[4716]: I1209 15:32:07.619419 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280-dns-svc\") pod \"dnsmasq-dns-764c5664d7-ww9d6\" (UID: \"2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280\") " pod="openstack/dnsmasq-dns-764c5664d7-ww9d6" Dec 09 15:32:07 crc kubenswrapper[4716]: I1209 15:32:07.672814 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-992zw\" (UniqueName: \"kubernetes.io/projected/2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280-kube-api-access-992zw\") pod \"dnsmasq-dns-764c5664d7-ww9d6\" (UID: \"2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280\") " pod="openstack/dnsmasq-dns-764c5664d7-ww9d6" Dec 09 15:32:07 crc kubenswrapper[4716]: I1209 15:32:07.807568 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-764c5664d7-ww9d6" Dec 09 15:32:07 crc kubenswrapper[4716]: I1209 15:32:07.981669 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-9v9w2" Dec 09 15:32:07 crc kubenswrapper[4716]: I1209 15:32:07.982767 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-9v9w2" event={"ID":"45c934d3-830f-4a67-ae5f-cd703dbed98c","Type":"ContainerDied","Data":"f25ad235bcdc75e917781586c096cf42a973e8feb294b3d09d5d34b6434ae5b9"} Dec 09 15:32:07 crc kubenswrapper[4716]: I1209 15:32:07.982816 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f25ad235bcdc75e917781586c096cf42a973e8feb294b3d09d5d34b6434ae5b9" Dec 09 15:32:08 crc kubenswrapper[4716]: I1209 15:32:08.351225 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-ww9d6"] Dec 09 15:32:08 crc kubenswrapper[4716]: I1209 15:32:08.589361 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-62b3-account-create-update-nwf77" Dec 09 15:32:08 crc kubenswrapper[4716]: I1209 15:32:08.757565 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9559f453-f355-4947-9078-0dc08b47d647-operator-scripts\") pod \"9559f453-f355-4947-9078-0dc08b47d647\" (UID: \"9559f453-f355-4947-9078-0dc08b47d647\") " Dec 09 15:32:08 crc kubenswrapper[4716]: I1209 15:32:08.758112 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bqg2h\" (UniqueName: \"kubernetes.io/projected/9559f453-f355-4947-9078-0dc08b47d647-kube-api-access-bqg2h\") pod \"9559f453-f355-4947-9078-0dc08b47d647\" (UID: \"9559f453-f355-4947-9078-0dc08b47d647\") " Dec 09 15:32:08 crc kubenswrapper[4716]: I1209 15:32:08.760183 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9559f453-f355-4947-9078-0dc08b47d647-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9559f453-f355-4947-9078-0dc08b47d647" (UID: "9559f453-f355-4947-9078-0dc08b47d647"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:32:08 crc kubenswrapper[4716]: I1209 15:32:08.763819 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9559f453-f355-4947-9078-0dc08b47d647-kube-api-access-bqg2h" (OuterVolumeSpecName: "kube-api-access-bqg2h") pod "9559f453-f355-4947-9078-0dc08b47d647" (UID: "9559f453-f355-4947-9078-0dc08b47d647"). InnerVolumeSpecName "kube-api-access-bqg2h". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:32:08 crc kubenswrapper[4716]: I1209 15:32:08.861014 4716 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9559f453-f355-4947-9078-0dc08b47d647-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:08 crc kubenswrapper[4716]: I1209 15:32:08.861056 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bqg2h\" (UniqueName: \"kubernetes.io/projected/9559f453-f355-4947-9078-0dc08b47d647-kube-api-access-bqg2h\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:08 crc kubenswrapper[4716]: I1209 15:32:08.994123 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-62b3-account-create-update-nwf77" event={"ID":"9559f453-f355-4947-9078-0dc08b47d647","Type":"ContainerDied","Data":"3c5ce693c915d3887c77d733ad40d65103f56c05b3e94207b78beb880eee1ac3"} Dec 09 15:32:08 crc kubenswrapper[4716]: I1209 15:32:08.994162 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-62b3-account-create-update-nwf77" Dec 09 15:32:08 crc kubenswrapper[4716]: I1209 15:32:08.994164 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3c5ce693c915d3887c77d733ad40d65103f56c05b3e94207b78beb880eee1ac3" Dec 09 15:32:08 crc kubenswrapper[4716]: I1209 15:32:08.995538 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764c5664d7-ww9d6" event={"ID":"2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280","Type":"ContainerStarted","Data":"e1cfe3ff801ede2224dee2cf717d531029461f375521c54df925f90c17ca91fe"} Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.732335 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-cp69g" Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.742401 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-xsvm8" Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.751474 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-mg54n" Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.803046 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-bde5-account-create-update-jtrwz" Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.806584 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-3ed5-account-create-update-c4v8k" Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.825984 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-d961-account-create-update-jk7hm" Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.831458 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xc2d5\" (UniqueName: \"kubernetes.io/projected/958ef966-3a33-491c-98fc-b13f19437e00-kube-api-access-xc2d5\") pod \"958ef966-3a33-491c-98fc-b13f19437e00\" (UID: \"958ef966-3a33-491c-98fc-b13f19437e00\") " Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.831544 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8zxmx\" (UniqueName: \"kubernetes.io/projected/3533a208-b517-4082-9d3b-17baddccedfd-kube-api-access-8zxmx\") pod \"3533a208-b517-4082-9d3b-17baddccedfd\" (UID: \"3533a208-b517-4082-9d3b-17baddccedfd\") " Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.831585 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/958ef966-3a33-491c-98fc-b13f19437e00-operator-scripts\") pod \"958ef966-3a33-491c-98fc-b13f19437e00\" (UID: \"958ef966-3a33-491c-98fc-b13f19437e00\") " Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.831730 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w5798\" (UniqueName: \"kubernetes.io/projected/456491de-2a30-4a09-8a30-de5a3c8ef790-kube-api-access-w5798\") pod \"456491de-2a30-4a09-8a30-de5a3c8ef790\" (UID: \"456491de-2a30-4a09-8a30-de5a3c8ef790\") " Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.831753 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eff29269-58ab-4659-82bb-6bef2fd7d5d1-operator-scripts\") pod \"eff29269-58ab-4659-82bb-6bef2fd7d5d1\" (UID: \"eff29269-58ab-4659-82bb-6bef2fd7d5d1\") " Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.831784 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/456491de-2a30-4a09-8a30-de5a3c8ef790-operator-scripts\") pod \"456491de-2a30-4a09-8a30-de5a3c8ef790\" (UID: \"456491de-2a30-4a09-8a30-de5a3c8ef790\") " Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.831831 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/75caa54f-bdef-4ae6-ba85-94c61ddc84f0-operator-scripts\") pod \"75caa54f-bdef-4ae6-ba85-94c61ddc84f0\" (UID: \"75caa54f-bdef-4ae6-ba85-94c61ddc84f0\") " Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.831857 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7qqnj\" (UniqueName: \"kubernetes.io/projected/75caa54f-bdef-4ae6-ba85-94c61ddc84f0-kube-api-access-7qqnj\") pod \"75caa54f-bdef-4ae6-ba85-94c61ddc84f0\" (UID: \"75caa54f-bdef-4ae6-ba85-94c61ddc84f0\") " Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.831887 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mqh9q\" (UniqueName: \"kubernetes.io/projected/eff29269-58ab-4659-82bb-6bef2fd7d5d1-kube-api-access-mqh9q\") pod \"eff29269-58ab-4659-82bb-6bef2fd7d5d1\" (UID: \"eff29269-58ab-4659-82bb-6bef2fd7d5d1\") " Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.831911 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3533a208-b517-4082-9d3b-17baddccedfd-operator-scripts\") pod \"3533a208-b517-4082-9d3b-17baddccedfd\" (UID: \"3533a208-b517-4082-9d3b-17baddccedfd\") " Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.831931 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/812fdc3c-a04b-4074-9c33-2e4e2b82496d-operator-scripts\") pod \"812fdc3c-a04b-4074-9c33-2e4e2b82496d\" (UID: \"812fdc3c-a04b-4074-9c33-2e4e2b82496d\") " Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.831947 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pqhvx\" (UniqueName: \"kubernetes.io/projected/812fdc3c-a04b-4074-9c33-2e4e2b82496d-kube-api-access-pqhvx\") pod \"812fdc3c-a04b-4074-9c33-2e4e2b82496d\" (UID: \"812fdc3c-a04b-4074-9c33-2e4e2b82496d\") " Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.833828 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eff29269-58ab-4659-82bb-6bef2fd7d5d1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "eff29269-58ab-4659-82bb-6bef2fd7d5d1" (UID: "eff29269-58ab-4659-82bb-6bef2fd7d5d1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.834054 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/456491de-2a30-4a09-8a30-de5a3c8ef790-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "456491de-2a30-4a09-8a30-de5a3c8ef790" (UID: "456491de-2a30-4a09-8a30-de5a3c8ef790"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.834533 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/958ef966-3a33-491c-98fc-b13f19437e00-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "958ef966-3a33-491c-98fc-b13f19437e00" (UID: "958ef966-3a33-491c-98fc-b13f19437e00"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.836746 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/812fdc3c-a04b-4074-9c33-2e4e2b82496d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "812fdc3c-a04b-4074-9c33-2e4e2b82496d" (UID: "812fdc3c-a04b-4074-9c33-2e4e2b82496d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.836747 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3533a208-b517-4082-9d3b-17baddccedfd-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3533a208-b517-4082-9d3b-17baddccedfd" (UID: "3533a208-b517-4082-9d3b-17baddccedfd"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.836868 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3533a208-b517-4082-9d3b-17baddccedfd-kube-api-access-8zxmx" (OuterVolumeSpecName: "kube-api-access-8zxmx") pod "3533a208-b517-4082-9d3b-17baddccedfd" (UID: "3533a208-b517-4082-9d3b-17baddccedfd"). InnerVolumeSpecName "kube-api-access-8zxmx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.838928 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/75caa54f-bdef-4ae6-ba85-94c61ddc84f0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "75caa54f-bdef-4ae6-ba85-94c61ddc84f0" (UID: "75caa54f-bdef-4ae6-ba85-94c61ddc84f0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.839477 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/958ef966-3a33-491c-98fc-b13f19437e00-kube-api-access-xc2d5" (OuterVolumeSpecName: "kube-api-access-xc2d5") pod "958ef966-3a33-491c-98fc-b13f19437e00" (UID: "958ef966-3a33-491c-98fc-b13f19437e00"). InnerVolumeSpecName "kube-api-access-xc2d5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.839512 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/812fdc3c-a04b-4074-9c33-2e4e2b82496d-kube-api-access-pqhvx" (OuterVolumeSpecName: "kube-api-access-pqhvx") pod "812fdc3c-a04b-4074-9c33-2e4e2b82496d" (UID: "812fdc3c-a04b-4074-9c33-2e4e2b82496d"). InnerVolumeSpecName "kube-api-access-pqhvx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.847230 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eff29269-58ab-4659-82bb-6bef2fd7d5d1-kube-api-access-mqh9q" (OuterVolumeSpecName: "kube-api-access-mqh9q") pod "eff29269-58ab-4659-82bb-6bef2fd7d5d1" (UID: "eff29269-58ab-4659-82bb-6bef2fd7d5d1"). InnerVolumeSpecName "kube-api-access-mqh9q". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.858267 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/75caa54f-bdef-4ae6-ba85-94c61ddc84f0-kube-api-access-7qqnj" (OuterVolumeSpecName: "kube-api-access-7qqnj") pod "75caa54f-bdef-4ae6-ba85-94c61ddc84f0" (UID: "75caa54f-bdef-4ae6-ba85-94c61ddc84f0"). InnerVolumeSpecName "kube-api-access-7qqnj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.872999 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/456491de-2a30-4a09-8a30-de5a3c8ef790-kube-api-access-w5798" (OuterVolumeSpecName: "kube-api-access-w5798") pod "456491de-2a30-4a09-8a30-de5a3c8ef790" (UID: "456491de-2a30-4a09-8a30-de5a3c8ef790"). InnerVolumeSpecName "kube-api-access-w5798". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.933954 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xc2d5\" (UniqueName: \"kubernetes.io/projected/958ef966-3a33-491c-98fc-b13f19437e00-kube-api-access-xc2d5\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.934239 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8zxmx\" (UniqueName: \"kubernetes.io/projected/3533a208-b517-4082-9d3b-17baddccedfd-kube-api-access-8zxmx\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.934337 4716 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/958ef966-3a33-491c-98fc-b13f19437e00-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.934424 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w5798\" (UniqueName: \"kubernetes.io/projected/456491de-2a30-4a09-8a30-de5a3c8ef790-kube-api-access-w5798\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.934519 4716 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eff29269-58ab-4659-82bb-6bef2fd7d5d1-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.934612 4716 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/456491de-2a30-4a09-8a30-de5a3c8ef790-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.934726 4716 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/75caa54f-bdef-4ae6-ba85-94c61ddc84f0-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.934820 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7qqnj\" (UniqueName: \"kubernetes.io/projected/75caa54f-bdef-4ae6-ba85-94c61ddc84f0-kube-api-access-7qqnj\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.934910 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mqh9q\" (UniqueName: \"kubernetes.io/projected/eff29269-58ab-4659-82bb-6bef2fd7d5d1-kube-api-access-mqh9q\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.935006 4716 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3533a208-b517-4082-9d3b-17baddccedfd-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.935102 4716 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/812fdc3c-a04b-4074-9c33-2e4e2b82496d-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:11 crc kubenswrapper[4716]: I1209 15:32:11.935192 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pqhvx\" (UniqueName: \"kubernetes.io/projected/812fdc3c-a04b-4074-9c33-2e4e2b82496d-kube-api-access-pqhvx\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:12 crc kubenswrapper[4716]: I1209 15:32:12.039635 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-cp69g" event={"ID":"75caa54f-bdef-4ae6-ba85-94c61ddc84f0","Type":"ContainerDied","Data":"e081c60e9f623a770c8cea004e6561d3eecd31707185e08eb6317ab00797f6ca"} Dec 09 15:32:12 crc kubenswrapper[4716]: I1209 15:32:12.039699 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e081c60e9f623a770c8cea004e6561d3eecd31707185e08eb6317ab00797f6ca" Dec 09 15:32:12 crc kubenswrapper[4716]: I1209 15:32:12.039974 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-cp69g" Dec 09 15:32:12 crc kubenswrapper[4716]: I1209 15:32:12.042090 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-xsvm8" event={"ID":"3533a208-b517-4082-9d3b-17baddccedfd","Type":"ContainerDied","Data":"bb13bf668546dfed8e057baba45358100eec485b23fde7db8b462b482e294d47"} Dec 09 15:32:12 crc kubenswrapper[4716]: I1209 15:32:12.042129 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bb13bf668546dfed8e057baba45358100eec485b23fde7db8b462b482e294d47" Dec 09 15:32:12 crc kubenswrapper[4716]: I1209 15:32:12.042151 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-xsvm8" Dec 09 15:32:12 crc kubenswrapper[4716]: I1209 15:32:12.054267 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-pzgnj" event={"ID":"c120b5a8-f662-4839-be5c-b9d94e80ab72","Type":"ContainerStarted","Data":"5c87a39b51845cec97b5e11f1d6aefaf48ad0f3913e6761edce07cab1f310632"} Dec 09 15:32:12 crc kubenswrapper[4716]: I1209 15:32:12.057356 4716 generic.go:334] "Generic (PLEG): container finished" podID="2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280" containerID="08d67b00730260387d582378594983b19712d6f28701b9a576ec7a8636e92ba0" exitCode=0 Dec 09 15:32:12 crc kubenswrapper[4716]: I1209 15:32:12.057908 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764c5664d7-ww9d6" event={"ID":"2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280","Type":"ContainerDied","Data":"08d67b00730260387d582378594983b19712d6f28701b9a576ec7a8636e92ba0"} Dec 09 15:32:12 crc kubenswrapper[4716]: I1209 15:32:12.062789 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-d961-account-create-update-jk7hm" event={"ID":"812fdc3c-a04b-4074-9c33-2e4e2b82496d","Type":"ContainerDied","Data":"68529f57b8d389c7276459301b445664348755873e63f0f0c1279dbdc3417249"} Dec 09 15:32:12 crc kubenswrapper[4716]: I1209 15:32:12.062825 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="68529f57b8d389c7276459301b445664348755873e63f0f0c1279dbdc3417249" Dec 09 15:32:12 crc kubenswrapper[4716]: I1209 15:32:12.062902 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-d961-account-create-update-jk7hm" Dec 09 15:32:12 crc kubenswrapper[4716]: I1209 15:32:12.066823 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-3ed5-account-create-update-c4v8k" event={"ID":"eff29269-58ab-4659-82bb-6bef2fd7d5d1","Type":"ContainerDied","Data":"d788959ffb376fca89d3be0a62a3e96478fba1f3b0952e63f1158adb6d89d818"} Dec 09 15:32:12 crc kubenswrapper[4716]: I1209 15:32:12.066878 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d788959ffb376fca89d3be0a62a3e96478fba1f3b0952e63f1158adb6d89d818" Dec 09 15:32:12 crc kubenswrapper[4716]: I1209 15:32:12.066851 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-3ed5-account-create-update-c4v8k" Dec 09 15:32:12 crc kubenswrapper[4716]: I1209 15:32:12.071135 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-mg54n" event={"ID":"958ef966-3a33-491c-98fc-b13f19437e00","Type":"ContainerDied","Data":"2c73f5a5aea34aa75dd81edde918d958503f6c0e24d4ea47f7c7de3ee710f667"} Dec 09 15:32:12 crc kubenswrapper[4716]: I1209 15:32:12.071185 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2c73f5a5aea34aa75dd81edde918d958503f6c0e24d4ea47f7c7de3ee710f667" Dec 09 15:32:12 crc kubenswrapper[4716]: I1209 15:32:12.071272 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-mg54n" Dec 09 15:32:12 crc kubenswrapper[4716]: I1209 15:32:12.080166 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-pzgnj" podStartSLOduration=3.953002655 podStartE2EDuration="11.080144888s" podCreationTimestamp="2025-12-09 15:32:01 +0000 UTC" firstStartedPulling="2025-12-09 15:32:04.403740704 +0000 UTC m=+1411.558484692" lastFinishedPulling="2025-12-09 15:32:11.530882947 +0000 UTC m=+1418.685626925" observedRunningTime="2025-12-09 15:32:12.072906939 +0000 UTC m=+1419.227650937" watchObservedRunningTime="2025-12-09 15:32:12.080144888 +0000 UTC m=+1419.234888876" Dec 09 15:32:12 crc kubenswrapper[4716]: I1209 15:32:12.094351 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-bde5-account-create-update-jtrwz" event={"ID":"456491de-2a30-4a09-8a30-de5a3c8ef790","Type":"ContainerDied","Data":"3909d54231733f2b2917cf392b06ece68110c20d51b4a0c7b2626ccf121474e8"} Dec 09 15:32:12 crc kubenswrapper[4716]: I1209 15:32:12.094397 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3909d54231733f2b2917cf392b06ece68110c20d51b4a0c7b2626ccf121474e8" Dec 09 15:32:12 crc kubenswrapper[4716]: I1209 15:32:12.094466 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-bde5-account-create-update-jtrwz" Dec 09 15:32:13 crc kubenswrapper[4716]: I1209 15:32:13.107021 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764c5664d7-ww9d6" event={"ID":"2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280","Type":"ContainerStarted","Data":"5de731bef50185e363a10963abb63127e1250f1e103ed2003414e8104cf52bf7"} Dec 09 15:32:13 crc kubenswrapper[4716]: I1209 15:32:13.107537 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-764c5664d7-ww9d6" Dec 09 15:32:13 crc kubenswrapper[4716]: I1209 15:32:13.109663 4716 generic.go:334] "Generic (PLEG): container finished" podID="cf4177cf-f9cb-4e7f-94d8-bec58db07c2f" containerID="f911366deb5be34ddbd72b25d8fc89f32ff62065ea6c93a230d269e1ad867d39" exitCode=0 Dec 09 15:32:13 crc kubenswrapper[4716]: I1209 15:32:13.109742 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-zf7ll" event={"ID":"cf4177cf-f9cb-4e7f-94d8-bec58db07c2f","Type":"ContainerDied","Data":"f911366deb5be34ddbd72b25d8fc89f32ff62065ea6c93a230d269e1ad867d39"} Dec 09 15:32:13 crc kubenswrapper[4716]: I1209 15:32:13.128919 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-764c5664d7-ww9d6" podStartSLOduration=6.128892594 podStartE2EDuration="6.128892594s" podCreationTimestamp="2025-12-09 15:32:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:32:13.126995319 +0000 UTC m=+1420.281739317" watchObservedRunningTime="2025-12-09 15:32:13.128892594 +0000 UTC m=+1420.283636582" Dec 09 15:32:14 crc kubenswrapper[4716]: I1209 15:32:14.533746 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-zf7ll" Dec 09 15:32:14 crc kubenswrapper[4716]: I1209 15:32:14.698460 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf4177cf-f9cb-4e7f-94d8-bec58db07c2f-combined-ca-bundle\") pod \"cf4177cf-f9cb-4e7f-94d8-bec58db07c2f\" (UID: \"cf4177cf-f9cb-4e7f-94d8-bec58db07c2f\") " Dec 09 15:32:14 crc kubenswrapper[4716]: I1209 15:32:14.698726 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6jnfb\" (UniqueName: \"kubernetes.io/projected/cf4177cf-f9cb-4e7f-94d8-bec58db07c2f-kube-api-access-6jnfb\") pod \"cf4177cf-f9cb-4e7f-94d8-bec58db07c2f\" (UID: \"cf4177cf-f9cb-4e7f-94d8-bec58db07c2f\") " Dec 09 15:32:14 crc kubenswrapper[4716]: I1209 15:32:14.698760 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/cf4177cf-f9cb-4e7f-94d8-bec58db07c2f-db-sync-config-data\") pod \"cf4177cf-f9cb-4e7f-94d8-bec58db07c2f\" (UID: \"cf4177cf-f9cb-4e7f-94d8-bec58db07c2f\") " Dec 09 15:32:14 crc kubenswrapper[4716]: I1209 15:32:14.698793 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf4177cf-f9cb-4e7f-94d8-bec58db07c2f-config-data\") pod \"cf4177cf-f9cb-4e7f-94d8-bec58db07c2f\" (UID: \"cf4177cf-f9cb-4e7f-94d8-bec58db07c2f\") " Dec 09 15:32:14 crc kubenswrapper[4716]: I1209 15:32:14.704076 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf4177cf-f9cb-4e7f-94d8-bec58db07c2f-kube-api-access-6jnfb" (OuterVolumeSpecName: "kube-api-access-6jnfb") pod "cf4177cf-f9cb-4e7f-94d8-bec58db07c2f" (UID: "cf4177cf-f9cb-4e7f-94d8-bec58db07c2f"). InnerVolumeSpecName "kube-api-access-6jnfb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:32:14 crc kubenswrapper[4716]: I1209 15:32:14.705868 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf4177cf-f9cb-4e7f-94d8-bec58db07c2f-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "cf4177cf-f9cb-4e7f-94d8-bec58db07c2f" (UID: "cf4177cf-f9cb-4e7f-94d8-bec58db07c2f"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:32:14 crc kubenswrapper[4716]: I1209 15:32:14.728902 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf4177cf-f9cb-4e7f-94d8-bec58db07c2f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cf4177cf-f9cb-4e7f-94d8-bec58db07c2f" (UID: "cf4177cf-f9cb-4e7f-94d8-bec58db07c2f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:32:14 crc kubenswrapper[4716]: I1209 15:32:14.753245 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf4177cf-f9cb-4e7f-94d8-bec58db07c2f-config-data" (OuterVolumeSpecName: "config-data") pod "cf4177cf-f9cb-4e7f-94d8-bec58db07c2f" (UID: "cf4177cf-f9cb-4e7f-94d8-bec58db07c2f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:32:14 crc kubenswrapper[4716]: I1209 15:32:14.801341 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6jnfb\" (UniqueName: \"kubernetes.io/projected/cf4177cf-f9cb-4e7f-94d8-bec58db07c2f-kube-api-access-6jnfb\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:14 crc kubenswrapper[4716]: I1209 15:32:14.801387 4716 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/cf4177cf-f9cb-4e7f-94d8-bec58db07c2f-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:14 crc kubenswrapper[4716]: I1209 15:32:14.801397 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf4177cf-f9cb-4e7f-94d8-bec58db07c2f-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:14 crc kubenswrapper[4716]: I1209 15:32:14.801406 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf4177cf-f9cb-4e7f-94d8-bec58db07c2f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.131333 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-zf7ll" event={"ID":"cf4177cf-f9cb-4e7f-94d8-bec58db07c2f","Type":"ContainerDied","Data":"b2f74ff7785b2e59ce11f5d6f3997301f6e0b1e06490de875caf22b8308f7ff3"} Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.131681 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b2f74ff7785b2e59ce11f5d6f3997301f6e0b1e06490de875caf22b8308f7ff3" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.131392 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-zf7ll" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.563413 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-ww9d6"] Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.563668 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-764c5664d7-ww9d6" podUID="2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280" containerName="dnsmasq-dns" containerID="cri-o://5de731bef50185e363a10963abb63127e1250f1e103ed2003414e8104cf52bf7" gracePeriod=10 Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.619904 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-74f6bcbc87-54tdq"] Dec 09 15:32:15 crc kubenswrapper[4716]: E1209 15:32:15.620406 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eff29269-58ab-4659-82bb-6bef2fd7d5d1" containerName="mariadb-account-create-update" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.620431 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="eff29269-58ab-4659-82bb-6bef2fd7d5d1" containerName="mariadb-account-create-update" Dec 09 15:32:15 crc kubenswrapper[4716]: E1209 15:32:15.620456 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf4177cf-f9cb-4e7f-94d8-bec58db07c2f" containerName="glance-db-sync" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.620463 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf4177cf-f9cb-4e7f-94d8-bec58db07c2f" containerName="glance-db-sync" Dec 09 15:32:15 crc kubenswrapper[4716]: E1209 15:32:15.620479 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45c934d3-830f-4a67-ae5f-cd703dbed98c" containerName="mariadb-database-create" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.620485 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="45c934d3-830f-4a67-ae5f-cd703dbed98c" containerName="mariadb-database-create" Dec 09 15:32:15 crc kubenswrapper[4716]: E1209 15:32:15.620498 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="456491de-2a30-4a09-8a30-de5a3c8ef790" containerName="mariadb-account-create-update" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.620504 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="456491de-2a30-4a09-8a30-de5a3c8ef790" containerName="mariadb-account-create-update" Dec 09 15:32:15 crc kubenswrapper[4716]: E1209 15:32:15.620523 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="812fdc3c-a04b-4074-9c33-2e4e2b82496d" containerName="mariadb-account-create-update" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.620529 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="812fdc3c-a04b-4074-9c33-2e4e2b82496d" containerName="mariadb-account-create-update" Dec 09 15:32:15 crc kubenswrapper[4716]: E1209 15:32:15.620538 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="958ef966-3a33-491c-98fc-b13f19437e00" containerName="mariadb-database-create" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.620544 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="958ef966-3a33-491c-98fc-b13f19437e00" containerName="mariadb-database-create" Dec 09 15:32:15 crc kubenswrapper[4716]: E1209 15:32:15.620555 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3533a208-b517-4082-9d3b-17baddccedfd" containerName="mariadb-database-create" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.620562 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="3533a208-b517-4082-9d3b-17baddccedfd" containerName="mariadb-database-create" Dec 09 15:32:15 crc kubenswrapper[4716]: E1209 15:32:15.620580 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9559f453-f355-4947-9078-0dc08b47d647" containerName="mariadb-account-create-update" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.620588 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="9559f453-f355-4947-9078-0dc08b47d647" containerName="mariadb-account-create-update" Dec 09 15:32:15 crc kubenswrapper[4716]: E1209 15:32:15.620610 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75caa54f-bdef-4ae6-ba85-94c61ddc84f0" containerName="mariadb-database-create" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.620617 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="75caa54f-bdef-4ae6-ba85-94c61ddc84f0" containerName="mariadb-database-create" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.620875 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf4177cf-f9cb-4e7f-94d8-bec58db07c2f" containerName="glance-db-sync" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.620896 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="958ef966-3a33-491c-98fc-b13f19437e00" containerName="mariadb-database-create" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.620921 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="812fdc3c-a04b-4074-9c33-2e4e2b82496d" containerName="mariadb-account-create-update" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.620937 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="3533a208-b517-4082-9d3b-17baddccedfd" containerName="mariadb-database-create" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.620951 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="75caa54f-bdef-4ae6-ba85-94c61ddc84f0" containerName="mariadb-database-create" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.620960 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="45c934d3-830f-4a67-ae5f-cd703dbed98c" containerName="mariadb-database-create" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.620966 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="9559f453-f355-4947-9078-0dc08b47d647" containerName="mariadb-account-create-update" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.620977 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="eff29269-58ab-4659-82bb-6bef2fd7d5d1" containerName="mariadb-account-create-update" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.620986 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="456491de-2a30-4a09-8a30-de5a3c8ef790" containerName="mariadb-account-create-update" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.622337 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74f6bcbc87-54tdq" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.654290 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-74f6bcbc87-54tdq"] Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.720144 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e97c9733-3a49-4920-a32b-7bb6b35786f7-dns-svc\") pod \"dnsmasq-dns-74f6bcbc87-54tdq\" (UID: \"e97c9733-3a49-4920-a32b-7bb6b35786f7\") " pod="openstack/dnsmasq-dns-74f6bcbc87-54tdq" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.720234 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e97c9733-3a49-4920-a32b-7bb6b35786f7-config\") pod \"dnsmasq-dns-74f6bcbc87-54tdq\" (UID: \"e97c9733-3a49-4920-a32b-7bb6b35786f7\") " pod="openstack/dnsmasq-dns-74f6bcbc87-54tdq" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.720305 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e97c9733-3a49-4920-a32b-7bb6b35786f7-dns-swift-storage-0\") pod \"dnsmasq-dns-74f6bcbc87-54tdq\" (UID: \"e97c9733-3a49-4920-a32b-7bb6b35786f7\") " pod="openstack/dnsmasq-dns-74f6bcbc87-54tdq" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.720401 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e97c9733-3a49-4920-a32b-7bb6b35786f7-ovsdbserver-sb\") pod \"dnsmasq-dns-74f6bcbc87-54tdq\" (UID: \"e97c9733-3a49-4920-a32b-7bb6b35786f7\") " pod="openstack/dnsmasq-dns-74f6bcbc87-54tdq" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.720430 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tq267\" (UniqueName: \"kubernetes.io/projected/e97c9733-3a49-4920-a32b-7bb6b35786f7-kube-api-access-tq267\") pod \"dnsmasq-dns-74f6bcbc87-54tdq\" (UID: \"e97c9733-3a49-4920-a32b-7bb6b35786f7\") " pod="openstack/dnsmasq-dns-74f6bcbc87-54tdq" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.720472 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e97c9733-3a49-4920-a32b-7bb6b35786f7-ovsdbserver-nb\") pod \"dnsmasq-dns-74f6bcbc87-54tdq\" (UID: \"e97c9733-3a49-4920-a32b-7bb6b35786f7\") " pod="openstack/dnsmasq-dns-74f6bcbc87-54tdq" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.822353 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e97c9733-3a49-4920-a32b-7bb6b35786f7-ovsdbserver-nb\") pod \"dnsmasq-dns-74f6bcbc87-54tdq\" (UID: \"e97c9733-3a49-4920-a32b-7bb6b35786f7\") " pod="openstack/dnsmasq-dns-74f6bcbc87-54tdq" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.822491 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e97c9733-3a49-4920-a32b-7bb6b35786f7-dns-svc\") pod \"dnsmasq-dns-74f6bcbc87-54tdq\" (UID: \"e97c9733-3a49-4920-a32b-7bb6b35786f7\") " pod="openstack/dnsmasq-dns-74f6bcbc87-54tdq" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.822573 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e97c9733-3a49-4920-a32b-7bb6b35786f7-config\") pod \"dnsmasq-dns-74f6bcbc87-54tdq\" (UID: \"e97c9733-3a49-4920-a32b-7bb6b35786f7\") " pod="openstack/dnsmasq-dns-74f6bcbc87-54tdq" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.822762 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e97c9733-3a49-4920-a32b-7bb6b35786f7-dns-swift-storage-0\") pod \"dnsmasq-dns-74f6bcbc87-54tdq\" (UID: \"e97c9733-3a49-4920-a32b-7bb6b35786f7\") " pod="openstack/dnsmasq-dns-74f6bcbc87-54tdq" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.822926 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e97c9733-3a49-4920-a32b-7bb6b35786f7-ovsdbserver-sb\") pod \"dnsmasq-dns-74f6bcbc87-54tdq\" (UID: \"e97c9733-3a49-4920-a32b-7bb6b35786f7\") " pod="openstack/dnsmasq-dns-74f6bcbc87-54tdq" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.822958 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tq267\" (UniqueName: \"kubernetes.io/projected/e97c9733-3a49-4920-a32b-7bb6b35786f7-kube-api-access-tq267\") pod \"dnsmasq-dns-74f6bcbc87-54tdq\" (UID: \"e97c9733-3a49-4920-a32b-7bb6b35786f7\") " pod="openstack/dnsmasq-dns-74f6bcbc87-54tdq" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.823522 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e97c9733-3a49-4920-a32b-7bb6b35786f7-ovsdbserver-nb\") pod \"dnsmasq-dns-74f6bcbc87-54tdq\" (UID: \"e97c9733-3a49-4920-a32b-7bb6b35786f7\") " pod="openstack/dnsmasq-dns-74f6bcbc87-54tdq" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.823602 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e97c9733-3a49-4920-a32b-7bb6b35786f7-dns-swift-storage-0\") pod \"dnsmasq-dns-74f6bcbc87-54tdq\" (UID: \"e97c9733-3a49-4920-a32b-7bb6b35786f7\") " pod="openstack/dnsmasq-dns-74f6bcbc87-54tdq" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.823866 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e97c9733-3a49-4920-a32b-7bb6b35786f7-dns-svc\") pod \"dnsmasq-dns-74f6bcbc87-54tdq\" (UID: \"e97c9733-3a49-4920-a32b-7bb6b35786f7\") " pod="openstack/dnsmasq-dns-74f6bcbc87-54tdq" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.824117 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e97c9733-3a49-4920-a32b-7bb6b35786f7-ovsdbserver-sb\") pod \"dnsmasq-dns-74f6bcbc87-54tdq\" (UID: \"e97c9733-3a49-4920-a32b-7bb6b35786f7\") " pod="openstack/dnsmasq-dns-74f6bcbc87-54tdq" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.824661 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e97c9733-3a49-4920-a32b-7bb6b35786f7-config\") pod \"dnsmasq-dns-74f6bcbc87-54tdq\" (UID: \"e97c9733-3a49-4920-a32b-7bb6b35786f7\") " pod="openstack/dnsmasq-dns-74f6bcbc87-54tdq" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.844234 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tq267\" (UniqueName: \"kubernetes.io/projected/e97c9733-3a49-4920-a32b-7bb6b35786f7-kube-api-access-tq267\") pod \"dnsmasq-dns-74f6bcbc87-54tdq\" (UID: \"e97c9733-3a49-4920-a32b-7bb6b35786f7\") " pod="openstack/dnsmasq-dns-74f6bcbc87-54tdq" Dec 09 15:32:15 crc kubenswrapper[4716]: I1209 15:32:15.952507 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74f6bcbc87-54tdq" Dec 09 15:32:16 crc kubenswrapper[4716]: I1209 15:32:16.155609 4716 generic.go:334] "Generic (PLEG): container finished" podID="c120b5a8-f662-4839-be5c-b9d94e80ab72" containerID="5c87a39b51845cec97b5e11f1d6aefaf48ad0f3913e6761edce07cab1f310632" exitCode=0 Dec 09 15:32:16 crc kubenswrapper[4716]: I1209 15:32:16.155882 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-pzgnj" event={"ID":"c120b5a8-f662-4839-be5c-b9d94e80ab72","Type":"ContainerDied","Data":"5c87a39b51845cec97b5e11f1d6aefaf48ad0f3913e6761edce07cab1f310632"} Dec 09 15:32:16 crc kubenswrapper[4716]: I1209 15:32:16.177698 4716 generic.go:334] "Generic (PLEG): container finished" podID="2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280" containerID="5de731bef50185e363a10963abb63127e1250f1e103ed2003414e8104cf52bf7" exitCode=0 Dec 09 15:32:16 crc kubenswrapper[4716]: I1209 15:32:16.177759 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764c5664d7-ww9d6" event={"ID":"2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280","Type":"ContainerDied","Data":"5de731bef50185e363a10963abb63127e1250f1e103ed2003414e8104cf52bf7"} Dec 09 15:32:16 crc kubenswrapper[4716]: I1209 15:32:16.415424 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-764c5664d7-ww9d6" Dec 09 15:32:16 crc kubenswrapper[4716]: I1209 15:32:16.573343 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280-ovsdbserver-sb\") pod \"2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280\" (UID: \"2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280\") " Dec 09 15:32:16 crc kubenswrapper[4716]: I1209 15:32:16.573404 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280-dns-svc\") pod \"2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280\" (UID: \"2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280\") " Dec 09 15:32:16 crc kubenswrapper[4716]: I1209 15:32:16.573467 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-992zw\" (UniqueName: \"kubernetes.io/projected/2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280-kube-api-access-992zw\") pod \"2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280\" (UID: \"2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280\") " Dec 09 15:32:16 crc kubenswrapper[4716]: I1209 15:32:16.573569 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280-dns-swift-storage-0\") pod \"2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280\" (UID: \"2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280\") " Dec 09 15:32:16 crc kubenswrapper[4716]: I1209 15:32:16.573625 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280-config\") pod \"2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280\" (UID: \"2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280\") " Dec 09 15:32:16 crc kubenswrapper[4716]: I1209 15:32:16.574045 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280-ovsdbserver-nb\") pod \"2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280\" (UID: \"2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280\") " Dec 09 15:32:16 crc kubenswrapper[4716]: I1209 15:32:16.592998 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280-kube-api-access-992zw" (OuterVolumeSpecName: "kube-api-access-992zw") pod "2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280" (UID: "2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280"). InnerVolumeSpecName "kube-api-access-992zw". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:32:16 crc kubenswrapper[4716]: I1209 15:32:16.633171 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-74f6bcbc87-54tdq"] Dec 09 15:32:16 crc kubenswrapper[4716]: I1209 15:32:16.689493 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-992zw\" (UniqueName: \"kubernetes.io/projected/2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280-kube-api-access-992zw\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:16 crc kubenswrapper[4716]: I1209 15:32:16.741452 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280-config" (OuterVolumeSpecName: "config") pod "2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280" (UID: "2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:32:16 crc kubenswrapper[4716]: I1209 15:32:16.758893 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280" (UID: "2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:32:16 crc kubenswrapper[4716]: I1209 15:32:16.764812 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280" (UID: "2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:32:16 crc kubenswrapper[4716]: I1209 15:32:16.773281 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280" (UID: "2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:32:16 crc kubenswrapper[4716]: I1209 15:32:16.782058 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280" (UID: "2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:32:16 crc kubenswrapper[4716]: I1209 15:32:16.791265 4716 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:16 crc kubenswrapper[4716]: I1209 15:32:16.791316 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:16 crc kubenswrapper[4716]: I1209 15:32:16.791332 4716 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:16 crc kubenswrapper[4716]: I1209 15:32:16.791342 4716 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:16 crc kubenswrapper[4716]: I1209 15:32:16.791353 4716 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:17 crc kubenswrapper[4716]: I1209 15:32:17.192902 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764c5664d7-ww9d6" event={"ID":"2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280","Type":"ContainerDied","Data":"e1cfe3ff801ede2224dee2cf717d531029461f375521c54df925f90c17ca91fe"} Dec 09 15:32:17 crc kubenswrapper[4716]: I1209 15:32:17.192955 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-764c5664d7-ww9d6" Dec 09 15:32:17 crc kubenswrapper[4716]: I1209 15:32:17.192980 4716 scope.go:117] "RemoveContainer" containerID="5de731bef50185e363a10963abb63127e1250f1e103ed2003414e8104cf52bf7" Dec 09 15:32:17 crc kubenswrapper[4716]: I1209 15:32:17.196070 4716 generic.go:334] "Generic (PLEG): container finished" podID="e97c9733-3a49-4920-a32b-7bb6b35786f7" containerID="7869062ed6b4b26c11d789a8f08f152167eff7a2cf7a80bb50e4a69080caa2f6" exitCode=0 Dec 09 15:32:17 crc kubenswrapper[4716]: I1209 15:32:17.196998 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f6bcbc87-54tdq" event={"ID":"e97c9733-3a49-4920-a32b-7bb6b35786f7","Type":"ContainerDied","Data":"7869062ed6b4b26c11d789a8f08f152167eff7a2cf7a80bb50e4a69080caa2f6"} Dec 09 15:32:17 crc kubenswrapper[4716]: I1209 15:32:17.197035 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f6bcbc87-54tdq" event={"ID":"e97c9733-3a49-4920-a32b-7bb6b35786f7","Type":"ContainerStarted","Data":"992f744db4e3ea391e9aa0f7b168343561bee425cd7d8492f3606e92dccb0c90"} Dec 09 15:32:17 crc kubenswrapper[4716]: I1209 15:32:17.231113 4716 scope.go:117] "RemoveContainer" containerID="08d67b00730260387d582378594983b19712d6f28701b9a576ec7a8636e92ba0" Dec 09 15:32:17 crc kubenswrapper[4716]: I1209 15:32:17.564276 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-pzgnj" Dec 09 15:32:17 crc kubenswrapper[4716]: I1209 15:32:17.622432 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-44qkv\" (UniqueName: \"kubernetes.io/projected/c120b5a8-f662-4839-be5c-b9d94e80ab72-kube-api-access-44qkv\") pod \"c120b5a8-f662-4839-be5c-b9d94e80ab72\" (UID: \"c120b5a8-f662-4839-be5c-b9d94e80ab72\") " Dec 09 15:32:17 crc kubenswrapper[4716]: I1209 15:32:17.622549 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c120b5a8-f662-4839-be5c-b9d94e80ab72-config-data\") pod \"c120b5a8-f662-4839-be5c-b9d94e80ab72\" (UID: \"c120b5a8-f662-4839-be5c-b9d94e80ab72\") " Dec 09 15:32:17 crc kubenswrapper[4716]: I1209 15:32:17.622594 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c120b5a8-f662-4839-be5c-b9d94e80ab72-combined-ca-bundle\") pod \"c120b5a8-f662-4839-be5c-b9d94e80ab72\" (UID: \"c120b5a8-f662-4839-be5c-b9d94e80ab72\") " Dec 09 15:32:17 crc kubenswrapper[4716]: I1209 15:32:17.627112 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c120b5a8-f662-4839-be5c-b9d94e80ab72-kube-api-access-44qkv" (OuterVolumeSpecName: "kube-api-access-44qkv") pod "c120b5a8-f662-4839-be5c-b9d94e80ab72" (UID: "c120b5a8-f662-4839-be5c-b9d94e80ab72"). InnerVolumeSpecName "kube-api-access-44qkv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:32:17 crc kubenswrapper[4716]: I1209 15:32:17.654503 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c120b5a8-f662-4839-be5c-b9d94e80ab72-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c120b5a8-f662-4839-be5c-b9d94e80ab72" (UID: "c120b5a8-f662-4839-be5c-b9d94e80ab72"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:32:17 crc kubenswrapper[4716]: I1209 15:32:17.679306 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c120b5a8-f662-4839-be5c-b9d94e80ab72-config-data" (OuterVolumeSpecName: "config-data") pod "c120b5a8-f662-4839-be5c-b9d94e80ab72" (UID: "c120b5a8-f662-4839-be5c-b9d94e80ab72"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:32:17 crc kubenswrapper[4716]: I1209 15:32:17.725258 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-44qkv\" (UniqueName: \"kubernetes.io/projected/c120b5a8-f662-4839-be5c-b9d94e80ab72-kube-api-access-44qkv\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:17 crc kubenswrapper[4716]: I1209 15:32:17.725305 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c120b5a8-f662-4839-be5c-b9d94e80ab72-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:17 crc kubenswrapper[4716]: I1209 15:32:17.725320 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c120b5a8-f662-4839-be5c-b9d94e80ab72-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:17 crc kubenswrapper[4716]: I1209 15:32:17.921926 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 15:32:17 crc kubenswrapper[4716]: I1209 15:32:17.922555 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.214718 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f6bcbc87-54tdq" event={"ID":"e97c9733-3a49-4920-a32b-7bb6b35786f7","Type":"ContainerStarted","Data":"c30e15dfd2f005b2d84520f4a12046ca139be2d41f0e2a1d675770eb1af36ca7"} Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.215536 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-74f6bcbc87-54tdq" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.217471 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-pzgnj" event={"ID":"c120b5a8-f662-4839-be5c-b9d94e80ab72","Type":"ContainerDied","Data":"d3e3092793e659f7d795a677c15d1da2098169f159e397ede709c0d2af6e04be"} Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.217500 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d3e3092793e659f7d795a677c15d1da2098169f159e397ede709c0d2af6e04be" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.217561 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-pzgnj" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.249745 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-74f6bcbc87-54tdq" podStartSLOduration=3.249721842 podStartE2EDuration="3.249721842s" podCreationTimestamp="2025-12-09 15:32:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:32:18.240298501 +0000 UTC m=+1425.395042489" watchObservedRunningTime="2025-12-09 15:32:18.249721842 +0000 UTC m=+1425.404465830" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.434137 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-74f6bcbc87-54tdq"] Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.445429 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-g4z57"] Dec 09 15:32:18 crc kubenswrapper[4716]: E1209 15:32:18.450374 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c120b5a8-f662-4839-be5c-b9d94e80ab72" containerName="keystone-db-sync" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.450415 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="c120b5a8-f662-4839-be5c-b9d94e80ab72" containerName="keystone-db-sync" Dec 09 15:32:18 crc kubenswrapper[4716]: E1209 15:32:18.450454 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280" containerName="init" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.450461 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280" containerName="init" Dec 09 15:32:18 crc kubenswrapper[4716]: E1209 15:32:18.450474 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280" containerName="dnsmasq-dns" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.450484 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280" containerName="dnsmasq-dns" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.450747 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280" containerName="dnsmasq-dns" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.450798 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="c120b5a8-f662-4839-be5c-b9d94e80ab72" containerName="keystone-db-sync" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.451715 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-g4z57" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.459010 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.463541 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.463868 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.465327 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.465555 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-kzrzm" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.484644 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-g4z57"] Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.495258 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-847c4cc679-lhj76"] Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.497405 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-847c4cc679-lhj76" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.521006 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-847c4cc679-lhj76"] Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.544461 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06ef0151-4164-436b-b817-5522ac1b07dd-config-data\") pod \"keystone-bootstrap-g4z57\" (UID: \"06ef0151-4164-436b-b817-5522ac1b07dd\") " pod="openstack/keystone-bootstrap-g4z57" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.544529 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0-dns-swift-storage-0\") pod \"dnsmasq-dns-847c4cc679-lhj76\" (UID: \"dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0\") " pod="openstack/dnsmasq-dns-847c4cc679-lhj76" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.544555 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/06ef0151-4164-436b-b817-5522ac1b07dd-fernet-keys\") pod \"keystone-bootstrap-g4z57\" (UID: \"06ef0151-4164-436b-b817-5522ac1b07dd\") " pod="openstack/keystone-bootstrap-g4z57" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.544578 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n95rc\" (UniqueName: \"kubernetes.io/projected/dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0-kube-api-access-n95rc\") pod \"dnsmasq-dns-847c4cc679-lhj76\" (UID: \"dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0\") " pod="openstack/dnsmasq-dns-847c4cc679-lhj76" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.544598 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/06ef0151-4164-436b-b817-5522ac1b07dd-scripts\") pod \"keystone-bootstrap-g4z57\" (UID: \"06ef0151-4164-436b-b817-5522ac1b07dd\") " pod="openstack/keystone-bootstrap-g4z57" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.544682 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0-ovsdbserver-sb\") pod \"dnsmasq-dns-847c4cc679-lhj76\" (UID: \"dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0\") " pod="openstack/dnsmasq-dns-847c4cc679-lhj76" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.544720 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0-ovsdbserver-nb\") pod \"dnsmasq-dns-847c4cc679-lhj76\" (UID: \"dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0\") " pod="openstack/dnsmasq-dns-847c4cc679-lhj76" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.544736 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/06ef0151-4164-436b-b817-5522ac1b07dd-credential-keys\") pod \"keystone-bootstrap-g4z57\" (UID: \"06ef0151-4164-436b-b817-5522ac1b07dd\") " pod="openstack/keystone-bootstrap-g4z57" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.544770 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2vjj8\" (UniqueName: \"kubernetes.io/projected/06ef0151-4164-436b-b817-5522ac1b07dd-kube-api-access-2vjj8\") pod \"keystone-bootstrap-g4z57\" (UID: \"06ef0151-4164-436b-b817-5522ac1b07dd\") " pod="openstack/keystone-bootstrap-g4z57" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.544816 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0-config\") pod \"dnsmasq-dns-847c4cc679-lhj76\" (UID: \"dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0\") " pod="openstack/dnsmasq-dns-847c4cc679-lhj76" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.544863 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0-dns-svc\") pod \"dnsmasq-dns-847c4cc679-lhj76\" (UID: \"dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0\") " pod="openstack/dnsmasq-dns-847c4cc679-lhj76" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.544886 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06ef0151-4164-436b-b817-5522ac1b07dd-combined-ca-bundle\") pod \"keystone-bootstrap-g4z57\" (UID: \"06ef0151-4164-436b-b817-5522ac1b07dd\") " pod="openstack/keystone-bootstrap-g4z57" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.648970 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0-ovsdbserver-sb\") pod \"dnsmasq-dns-847c4cc679-lhj76\" (UID: \"dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0\") " pod="openstack/dnsmasq-dns-847c4cc679-lhj76" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.649058 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0-ovsdbserver-nb\") pod \"dnsmasq-dns-847c4cc679-lhj76\" (UID: \"dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0\") " pod="openstack/dnsmasq-dns-847c4cc679-lhj76" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.649084 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/06ef0151-4164-436b-b817-5522ac1b07dd-credential-keys\") pod \"keystone-bootstrap-g4z57\" (UID: \"06ef0151-4164-436b-b817-5522ac1b07dd\") " pod="openstack/keystone-bootstrap-g4z57" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.649115 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2vjj8\" (UniqueName: \"kubernetes.io/projected/06ef0151-4164-436b-b817-5522ac1b07dd-kube-api-access-2vjj8\") pod \"keystone-bootstrap-g4z57\" (UID: \"06ef0151-4164-436b-b817-5522ac1b07dd\") " pod="openstack/keystone-bootstrap-g4z57" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.649161 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0-config\") pod \"dnsmasq-dns-847c4cc679-lhj76\" (UID: \"dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0\") " pod="openstack/dnsmasq-dns-847c4cc679-lhj76" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.649206 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0-dns-svc\") pod \"dnsmasq-dns-847c4cc679-lhj76\" (UID: \"dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0\") " pod="openstack/dnsmasq-dns-847c4cc679-lhj76" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.649232 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06ef0151-4164-436b-b817-5522ac1b07dd-combined-ca-bundle\") pod \"keystone-bootstrap-g4z57\" (UID: \"06ef0151-4164-436b-b817-5522ac1b07dd\") " pod="openstack/keystone-bootstrap-g4z57" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.649267 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06ef0151-4164-436b-b817-5522ac1b07dd-config-data\") pod \"keystone-bootstrap-g4z57\" (UID: \"06ef0151-4164-436b-b817-5522ac1b07dd\") " pod="openstack/keystone-bootstrap-g4z57" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.649312 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0-dns-swift-storage-0\") pod \"dnsmasq-dns-847c4cc679-lhj76\" (UID: \"dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0\") " pod="openstack/dnsmasq-dns-847c4cc679-lhj76" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.649333 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/06ef0151-4164-436b-b817-5522ac1b07dd-fernet-keys\") pod \"keystone-bootstrap-g4z57\" (UID: \"06ef0151-4164-436b-b817-5522ac1b07dd\") " pod="openstack/keystone-bootstrap-g4z57" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.649390 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n95rc\" (UniqueName: \"kubernetes.io/projected/dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0-kube-api-access-n95rc\") pod \"dnsmasq-dns-847c4cc679-lhj76\" (UID: \"dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0\") " pod="openstack/dnsmasq-dns-847c4cc679-lhj76" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.649419 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/06ef0151-4164-436b-b817-5522ac1b07dd-scripts\") pod \"keystone-bootstrap-g4z57\" (UID: \"06ef0151-4164-436b-b817-5522ac1b07dd\") " pod="openstack/keystone-bootstrap-g4z57" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.651556 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0-dns-svc\") pod \"dnsmasq-dns-847c4cc679-lhj76\" (UID: \"dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0\") " pod="openstack/dnsmasq-dns-847c4cc679-lhj76" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.652186 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0-ovsdbserver-sb\") pod \"dnsmasq-dns-847c4cc679-lhj76\" (UID: \"dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0\") " pod="openstack/dnsmasq-dns-847c4cc679-lhj76" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.652574 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0-config\") pod \"dnsmasq-dns-847c4cc679-lhj76\" (UID: \"dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0\") " pod="openstack/dnsmasq-dns-847c4cc679-lhj76" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.653133 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0-dns-swift-storage-0\") pod \"dnsmasq-dns-847c4cc679-lhj76\" (UID: \"dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0\") " pod="openstack/dnsmasq-dns-847c4cc679-lhj76" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.664424 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0-ovsdbserver-nb\") pod \"dnsmasq-dns-847c4cc679-lhj76\" (UID: \"dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0\") " pod="openstack/dnsmasq-dns-847c4cc679-lhj76" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.670851 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-xr25h"] Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.672756 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/06ef0151-4164-436b-b817-5522ac1b07dd-scripts\") pod \"keystone-bootstrap-g4z57\" (UID: \"06ef0151-4164-436b-b817-5522ac1b07dd\") " pod="openstack/keystone-bootstrap-g4z57" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.680375 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/06ef0151-4164-436b-b817-5522ac1b07dd-credential-keys\") pod \"keystone-bootstrap-g4z57\" (UID: \"06ef0151-4164-436b-b817-5522ac1b07dd\") " pod="openstack/keystone-bootstrap-g4z57" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.685805 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06ef0151-4164-436b-b817-5522ac1b07dd-config-data\") pod \"keystone-bootstrap-g4z57\" (UID: \"06ef0151-4164-436b-b817-5522ac1b07dd\") " pod="openstack/keystone-bootstrap-g4z57" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.692593 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-xr25h"] Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.692706 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-xr25h" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.693486 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2vjj8\" (UniqueName: \"kubernetes.io/projected/06ef0151-4164-436b-b817-5522ac1b07dd-kube-api-access-2vjj8\") pod \"keystone-bootstrap-g4z57\" (UID: \"06ef0151-4164-436b-b817-5522ac1b07dd\") " pod="openstack/keystone-bootstrap-g4z57" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.694099 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06ef0151-4164-436b-b817-5522ac1b07dd-combined-ca-bundle\") pod \"keystone-bootstrap-g4z57\" (UID: \"06ef0151-4164-436b-b817-5522ac1b07dd\") " pod="openstack/keystone-bootstrap-g4z57" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.694282 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/06ef0151-4164-436b-b817-5522ac1b07dd-fernet-keys\") pod \"keystone-bootstrap-g4z57\" (UID: \"06ef0151-4164-436b-b817-5522ac1b07dd\") " pod="openstack/keystone-bootstrap-g4z57" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.699221 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-x646c" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.699485 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.714544 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n95rc\" (UniqueName: \"kubernetes.io/projected/dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0-kube-api-access-n95rc\") pod \"dnsmasq-dns-847c4cc679-lhj76\" (UID: \"dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0\") " pod="openstack/dnsmasq-dns-847c4cc679-lhj76" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.774345 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-g4z57" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.825225 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-847c4cc679-lhj76" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.838177 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-2rwfp"] Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.839828 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-2rwfp" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.843065 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.843808 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-6rrkk" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.844406 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.884108 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e1fa7713-f67e-45a2-81c0-73a56280f744-config-data\") pod \"heat-db-sync-xr25h\" (UID: \"e1fa7713-f67e-45a2-81c0-73a56280f744\") " pod="openstack/heat-db-sync-xr25h" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.884234 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2vqb4\" (UniqueName: \"kubernetes.io/projected/3aba2daa-0fe3-419f-a361-1be829c1e3d0-kube-api-access-2vqb4\") pod \"neutron-db-sync-2rwfp\" (UID: \"3aba2daa-0fe3-419f-a361-1be829c1e3d0\") " pod="openstack/neutron-db-sync-2rwfp" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.884354 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e1fa7713-f67e-45a2-81c0-73a56280f744-combined-ca-bundle\") pod \"heat-db-sync-xr25h\" (UID: \"e1fa7713-f67e-45a2-81c0-73a56280f744\") " pod="openstack/heat-db-sync-xr25h" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.884422 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3aba2daa-0fe3-419f-a361-1be829c1e3d0-combined-ca-bundle\") pod \"neutron-db-sync-2rwfp\" (UID: \"3aba2daa-0fe3-419f-a361-1be829c1e3d0\") " pod="openstack/neutron-db-sync-2rwfp" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.884456 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2qchn\" (UniqueName: \"kubernetes.io/projected/e1fa7713-f67e-45a2-81c0-73a56280f744-kube-api-access-2qchn\") pod \"heat-db-sync-xr25h\" (UID: \"e1fa7713-f67e-45a2-81c0-73a56280f744\") " pod="openstack/heat-db-sync-xr25h" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.884510 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/3aba2daa-0fe3-419f-a361-1be829c1e3d0-config\") pod \"neutron-db-sync-2rwfp\" (UID: \"3aba2daa-0fe3-419f-a361-1be829c1e3d0\") " pod="openstack/neutron-db-sync-2rwfp" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.894666 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-2rwfp"] Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.929722 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-sqhrf"] Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.931340 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-sqhrf" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.955153 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-lxgp5" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.955414 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.955447 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.981178 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-d4ccd"] Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.987227 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e1fa7713-f67e-45a2-81c0-73a56280f744-config-data\") pod \"heat-db-sync-xr25h\" (UID: \"e1fa7713-f67e-45a2-81c0-73a56280f744\") " pod="openstack/heat-db-sync-xr25h" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.987307 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2vqb4\" (UniqueName: \"kubernetes.io/projected/3aba2daa-0fe3-419f-a361-1be829c1e3d0-kube-api-access-2vqb4\") pod \"neutron-db-sync-2rwfp\" (UID: \"3aba2daa-0fe3-419f-a361-1be829c1e3d0\") " pod="openstack/neutron-db-sync-2rwfp" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.987357 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e1fa7713-f67e-45a2-81c0-73a56280f744-combined-ca-bundle\") pod \"heat-db-sync-xr25h\" (UID: \"e1fa7713-f67e-45a2-81c0-73a56280f744\") " pod="openstack/heat-db-sync-xr25h" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.987389 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3aba2daa-0fe3-419f-a361-1be829c1e3d0-combined-ca-bundle\") pod \"neutron-db-sync-2rwfp\" (UID: \"3aba2daa-0fe3-419f-a361-1be829c1e3d0\") " pod="openstack/neutron-db-sync-2rwfp" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.987408 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2qchn\" (UniqueName: \"kubernetes.io/projected/e1fa7713-f67e-45a2-81c0-73a56280f744-kube-api-access-2qchn\") pod \"heat-db-sync-xr25h\" (UID: \"e1fa7713-f67e-45a2-81c0-73a56280f744\") " pod="openstack/heat-db-sync-xr25h" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.987439 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/3aba2daa-0fe3-419f-a361-1be829c1e3d0-config\") pod \"neutron-db-sync-2rwfp\" (UID: \"3aba2daa-0fe3-419f-a361-1be829c1e3d0\") " pod="openstack/neutron-db-sync-2rwfp" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.989714 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-d4ccd" Dec 09 15:32:18 crc kubenswrapper[4716]: I1209 15:32:18.998203 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/3aba2daa-0fe3-419f-a361-1be829c1e3d0-config\") pod \"neutron-db-sync-2rwfp\" (UID: \"3aba2daa-0fe3-419f-a361-1be829c1e3d0\") " pod="openstack/neutron-db-sync-2rwfp" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.001661 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e1fa7713-f67e-45a2-81c0-73a56280f744-config-data\") pod \"heat-db-sync-xr25h\" (UID: \"e1fa7713-f67e-45a2-81c0-73a56280f744\") " pod="openstack/heat-db-sync-xr25h" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.017547 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3aba2daa-0fe3-419f-a361-1be829c1e3d0-combined-ca-bundle\") pod \"neutron-db-sync-2rwfp\" (UID: \"3aba2daa-0fe3-419f-a361-1be829c1e3d0\") " pod="openstack/neutron-db-sync-2rwfp" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.018491 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e1fa7713-f67e-45a2-81c0-73a56280f744-combined-ca-bundle\") pod \"heat-db-sync-xr25h\" (UID: \"e1fa7713-f67e-45a2-81c0-73a56280f744\") " pod="openstack/heat-db-sync-xr25h" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.054368 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-847c4cc679-lhj76"] Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.059157 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.059477 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-flrvf" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.059629 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.067363 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2vqb4\" (UniqueName: \"kubernetes.io/projected/3aba2daa-0fe3-419f-a361-1be829c1e3d0-kube-api-access-2vqb4\") pod \"neutron-db-sync-2rwfp\" (UID: \"3aba2daa-0fe3-419f-a361-1be829c1e3d0\") " pod="openstack/neutron-db-sync-2rwfp" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.121570 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6cmjf\" (UniqueName: \"kubernetes.io/projected/2c59c8f4-e888-4345-8cdd-5581ef0f801c-kube-api-access-6cmjf\") pod \"placement-db-sync-d4ccd\" (UID: \"2c59c8f4-e888-4345-8cdd-5581ef0f801c\") " pod="openstack/placement-db-sync-d4ccd" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.121669 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c59c8f4-e888-4345-8cdd-5581ef0f801c-logs\") pod \"placement-db-sync-d4ccd\" (UID: \"2c59c8f4-e888-4345-8cdd-5581ef0f801c\") " pod="openstack/placement-db-sync-d4ccd" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.121702 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c59c8f4-e888-4345-8cdd-5581ef0f801c-combined-ca-bundle\") pod \"placement-db-sync-d4ccd\" (UID: \"2c59c8f4-e888-4345-8cdd-5581ef0f801c\") " pod="openstack/placement-db-sync-d4ccd" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.124475 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-2rwfp" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.132474 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2qchn\" (UniqueName: \"kubernetes.io/projected/e1fa7713-f67e-45a2-81c0-73a56280f744-kube-api-access-2qchn\") pod \"heat-db-sync-xr25h\" (UID: \"e1fa7713-f67e-45a2-81c0-73a56280f744\") " pod="openstack/heat-db-sync-xr25h" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.153032 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c59c8f4-e888-4345-8cdd-5581ef0f801c-config-data\") pod \"placement-db-sync-d4ccd\" (UID: \"2c59c8f4-e888-4345-8cdd-5581ef0f801c\") " pod="openstack/placement-db-sync-d4ccd" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.153946 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c31e7c21-64fd-4bb2-b165-df1743489363-config-data\") pod \"cinder-db-sync-sqhrf\" (UID: \"c31e7c21-64fd-4bb2-b165-df1743489363\") " pod="openstack/cinder-db-sync-sqhrf" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.154251 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c31e7c21-64fd-4bb2-b165-df1743489363-db-sync-config-data\") pod \"cinder-db-sync-sqhrf\" (UID: \"c31e7c21-64fd-4bb2-b165-df1743489363\") " pod="openstack/cinder-db-sync-sqhrf" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.166858 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c31e7c21-64fd-4bb2-b165-df1743489363-scripts\") pod \"cinder-db-sync-sqhrf\" (UID: \"c31e7c21-64fd-4bb2-b165-df1743489363\") " pod="openstack/cinder-db-sync-sqhrf" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.166979 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2c59c8f4-e888-4345-8cdd-5581ef0f801c-scripts\") pod \"placement-db-sync-d4ccd\" (UID: \"2c59c8f4-e888-4345-8cdd-5581ef0f801c\") " pod="openstack/placement-db-sync-d4ccd" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.167103 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c31e7c21-64fd-4bb2-b165-df1743489363-etc-machine-id\") pod \"cinder-db-sync-sqhrf\" (UID: \"c31e7c21-64fd-4bb2-b165-df1743489363\") " pod="openstack/cinder-db-sync-sqhrf" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.167201 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5644v\" (UniqueName: \"kubernetes.io/projected/c31e7c21-64fd-4bb2-b165-df1743489363-kube-api-access-5644v\") pod \"cinder-db-sync-sqhrf\" (UID: \"c31e7c21-64fd-4bb2-b165-df1743489363\") " pod="openstack/cinder-db-sync-sqhrf" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.167379 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c31e7c21-64fd-4bb2-b165-df1743489363-combined-ca-bundle\") pod \"cinder-db-sync-sqhrf\" (UID: \"c31e7c21-64fd-4bb2-b165-df1743489363\") " pod="openstack/cinder-db-sync-sqhrf" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.268288 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-sqhrf"] Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.268923 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-d4ccd"] Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.278528 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6cmjf\" (UniqueName: \"kubernetes.io/projected/2c59c8f4-e888-4345-8cdd-5581ef0f801c-kube-api-access-6cmjf\") pod \"placement-db-sync-d4ccd\" (UID: \"2c59c8f4-e888-4345-8cdd-5581ef0f801c\") " pod="openstack/placement-db-sync-d4ccd" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.278601 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c59c8f4-e888-4345-8cdd-5581ef0f801c-logs\") pod \"placement-db-sync-d4ccd\" (UID: \"2c59c8f4-e888-4345-8cdd-5581ef0f801c\") " pod="openstack/placement-db-sync-d4ccd" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.278652 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c59c8f4-e888-4345-8cdd-5581ef0f801c-combined-ca-bundle\") pod \"placement-db-sync-d4ccd\" (UID: \"2c59c8f4-e888-4345-8cdd-5581ef0f801c\") " pod="openstack/placement-db-sync-d4ccd" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.278788 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c59c8f4-e888-4345-8cdd-5581ef0f801c-config-data\") pod \"placement-db-sync-d4ccd\" (UID: \"2c59c8f4-e888-4345-8cdd-5581ef0f801c\") " pod="openstack/placement-db-sync-d4ccd" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.279033 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c31e7c21-64fd-4bb2-b165-df1743489363-config-data\") pod \"cinder-db-sync-sqhrf\" (UID: \"c31e7c21-64fd-4bb2-b165-df1743489363\") " pod="openstack/cinder-db-sync-sqhrf" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.279063 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c31e7c21-64fd-4bb2-b165-df1743489363-db-sync-config-data\") pod \"cinder-db-sync-sqhrf\" (UID: \"c31e7c21-64fd-4bb2-b165-df1743489363\") " pod="openstack/cinder-db-sync-sqhrf" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.279205 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c31e7c21-64fd-4bb2-b165-df1743489363-scripts\") pod \"cinder-db-sync-sqhrf\" (UID: \"c31e7c21-64fd-4bb2-b165-df1743489363\") " pod="openstack/cinder-db-sync-sqhrf" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.279251 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2c59c8f4-e888-4345-8cdd-5581ef0f801c-scripts\") pod \"placement-db-sync-d4ccd\" (UID: \"2c59c8f4-e888-4345-8cdd-5581ef0f801c\") " pod="openstack/placement-db-sync-d4ccd" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.279298 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c31e7c21-64fd-4bb2-b165-df1743489363-etc-machine-id\") pod \"cinder-db-sync-sqhrf\" (UID: \"c31e7c21-64fd-4bb2-b165-df1743489363\") " pod="openstack/cinder-db-sync-sqhrf" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.279345 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5644v\" (UniqueName: \"kubernetes.io/projected/c31e7c21-64fd-4bb2-b165-df1743489363-kube-api-access-5644v\") pod \"cinder-db-sync-sqhrf\" (UID: \"c31e7c21-64fd-4bb2-b165-df1743489363\") " pod="openstack/cinder-db-sync-sqhrf" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.279379 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c31e7c21-64fd-4bb2-b165-df1743489363-combined-ca-bundle\") pod \"cinder-db-sync-sqhrf\" (UID: \"c31e7c21-64fd-4bb2-b165-df1743489363\") " pod="openstack/cinder-db-sync-sqhrf" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.287569 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c59c8f4-e888-4345-8cdd-5581ef0f801c-logs\") pod \"placement-db-sync-d4ccd\" (UID: \"2c59c8f4-e888-4345-8cdd-5581ef0f801c\") " pod="openstack/placement-db-sync-d4ccd" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.290098 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-64jmg"] Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.290235 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c31e7c21-64fd-4bb2-b165-df1743489363-etc-machine-id\") pod \"cinder-db-sync-sqhrf\" (UID: \"c31e7c21-64fd-4bb2-b165-df1743489363\") " pod="openstack/cinder-db-sync-sqhrf" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.296297 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-64jmg" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.296804 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2c59c8f4-e888-4345-8cdd-5581ef0f801c-scripts\") pod \"placement-db-sync-d4ccd\" (UID: \"2c59c8f4-e888-4345-8cdd-5581ef0f801c\") " pod="openstack/placement-db-sync-d4ccd" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.300654 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-gnxqd" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.301053 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.301214 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c31e7c21-64fd-4bb2-b165-df1743489363-config-data\") pod \"cinder-db-sync-sqhrf\" (UID: \"c31e7c21-64fd-4bb2-b165-df1743489363\") " pod="openstack/cinder-db-sync-sqhrf" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.302387 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c31e7c21-64fd-4bb2-b165-df1743489363-scripts\") pod \"cinder-db-sync-sqhrf\" (UID: \"c31e7c21-64fd-4bb2-b165-df1743489363\") " pod="openstack/cinder-db-sync-sqhrf" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.304907 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c59c8f4-e888-4345-8cdd-5581ef0f801c-combined-ca-bundle\") pod \"placement-db-sync-d4ccd\" (UID: \"2c59c8f4-e888-4345-8cdd-5581ef0f801c\") " pod="openstack/placement-db-sync-d4ccd" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.306474 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c59c8f4-e888-4345-8cdd-5581ef0f801c-config-data\") pod \"placement-db-sync-d4ccd\" (UID: \"2c59c8f4-e888-4345-8cdd-5581ef0f801c\") " pod="openstack/placement-db-sync-d4ccd" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.312473 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6cmjf\" (UniqueName: \"kubernetes.io/projected/2c59c8f4-e888-4345-8cdd-5581ef0f801c-kube-api-access-6cmjf\") pod \"placement-db-sync-d4ccd\" (UID: \"2c59c8f4-e888-4345-8cdd-5581ef0f801c\") " pod="openstack/placement-db-sync-d4ccd" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.317105 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c31e7c21-64fd-4bb2-b165-df1743489363-combined-ca-bundle\") pod \"cinder-db-sync-sqhrf\" (UID: \"c31e7c21-64fd-4bb2-b165-df1743489363\") " pod="openstack/cinder-db-sync-sqhrf" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.321153 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c31e7c21-64fd-4bb2-b165-df1743489363-db-sync-config-data\") pod \"cinder-db-sync-sqhrf\" (UID: \"c31e7c21-64fd-4bb2-b165-df1743489363\") " pod="openstack/cinder-db-sync-sqhrf" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.338985 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-cb68t"] Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.341774 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5644v\" (UniqueName: \"kubernetes.io/projected/c31e7c21-64fd-4bb2-b165-df1743489363-kube-api-access-5644v\") pod \"cinder-db-sync-sqhrf\" (UID: \"c31e7c21-64fd-4bb2-b165-df1743489363\") " pod="openstack/cinder-db-sync-sqhrf" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.342654 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-785d8bcb8c-cb68t" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.367805 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-64jmg"] Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.381630 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jshx4\" (UniqueName: \"kubernetes.io/projected/8fd29da9-c781-4e6d-857c-17f84d72c639-kube-api-access-jshx4\") pod \"barbican-db-sync-64jmg\" (UID: \"8fd29da9-c781-4e6d-857c-17f84d72c639\") " pod="openstack/barbican-db-sync-64jmg" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.381741 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/8fd29da9-c781-4e6d-857c-17f84d72c639-db-sync-config-data\") pod \"barbican-db-sync-64jmg\" (UID: \"8fd29da9-c781-4e6d-857c-17f84d72c639\") " pod="openstack/barbican-db-sync-64jmg" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.381893 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8fd29da9-c781-4e6d-857c-17f84d72c639-combined-ca-bundle\") pod \"barbican-db-sync-64jmg\" (UID: \"8fd29da9-c781-4e6d-857c-17f84d72c639\") " pod="openstack/barbican-db-sync-64jmg" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.389586 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-cb68t"] Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.419462 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-xr25h" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.444590 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-sqhrf" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.447364 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.450878 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.454387 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.459016 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.488547 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e9b49f6e-b7fe-4dd8-81d2-738fd521c166-ovsdbserver-sb\") pod \"dnsmasq-dns-785d8bcb8c-cb68t\" (UID: \"e9b49f6e-b7fe-4dd8-81d2-738fd521c166\") " pod="openstack/dnsmasq-dns-785d8bcb8c-cb68t" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.488646 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e9b49f6e-b7fe-4dd8-81d2-738fd521c166-dns-swift-storage-0\") pod \"dnsmasq-dns-785d8bcb8c-cb68t\" (UID: \"e9b49f6e-b7fe-4dd8-81d2-738fd521c166\") " pod="openstack/dnsmasq-dns-785d8bcb8c-cb68t" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.488695 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e9b49f6e-b7fe-4dd8-81d2-738fd521c166-ovsdbserver-nb\") pod \"dnsmasq-dns-785d8bcb8c-cb68t\" (UID: \"e9b49f6e-b7fe-4dd8-81d2-738fd521c166\") " pod="openstack/dnsmasq-dns-785d8bcb8c-cb68t" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.488738 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jshx4\" (UniqueName: \"kubernetes.io/projected/8fd29da9-c781-4e6d-857c-17f84d72c639-kube-api-access-jshx4\") pod \"barbican-db-sync-64jmg\" (UID: \"8fd29da9-c781-4e6d-857c-17f84d72c639\") " pod="openstack/barbican-db-sync-64jmg" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.488824 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/8fd29da9-c781-4e6d-857c-17f84d72c639-db-sync-config-data\") pod \"barbican-db-sync-64jmg\" (UID: \"8fd29da9-c781-4e6d-857c-17f84d72c639\") " pod="openstack/barbican-db-sync-64jmg" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.488877 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9b49f6e-b7fe-4dd8-81d2-738fd521c166-config\") pod \"dnsmasq-dns-785d8bcb8c-cb68t\" (UID: \"e9b49f6e-b7fe-4dd8-81d2-738fd521c166\") " pod="openstack/dnsmasq-dns-785d8bcb8c-cb68t" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.488931 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e9b49f6e-b7fe-4dd8-81d2-738fd521c166-dns-svc\") pod \"dnsmasq-dns-785d8bcb8c-cb68t\" (UID: \"e9b49f6e-b7fe-4dd8-81d2-738fd521c166\") " pod="openstack/dnsmasq-dns-785d8bcb8c-cb68t" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.489011 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mwjq4\" (UniqueName: \"kubernetes.io/projected/e9b49f6e-b7fe-4dd8-81d2-738fd521c166-kube-api-access-mwjq4\") pod \"dnsmasq-dns-785d8bcb8c-cb68t\" (UID: \"e9b49f6e-b7fe-4dd8-81d2-738fd521c166\") " pod="openstack/dnsmasq-dns-785d8bcb8c-cb68t" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.489057 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8fd29da9-c781-4e6d-857c-17f84d72c639-combined-ca-bundle\") pod \"barbican-db-sync-64jmg\" (UID: \"8fd29da9-c781-4e6d-857c-17f84d72c639\") " pod="openstack/barbican-db-sync-64jmg" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.498284 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8fd29da9-c781-4e6d-857c-17f84d72c639-combined-ca-bundle\") pod \"barbican-db-sync-64jmg\" (UID: \"8fd29da9-c781-4e6d-857c-17f84d72c639\") " pod="openstack/barbican-db-sync-64jmg" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.499280 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-d4ccd" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.502564 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.504182 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/8fd29da9-c781-4e6d-857c-17f84d72c639-db-sync-config-data\") pod \"barbican-db-sync-64jmg\" (UID: \"8fd29da9-c781-4e6d-857c-17f84d72c639\") " pod="openstack/barbican-db-sync-64jmg" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.521160 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jshx4\" (UniqueName: \"kubernetes.io/projected/8fd29da9-c781-4e6d-857c-17f84d72c639-kube-api-access-jshx4\") pod \"barbican-db-sync-64jmg\" (UID: \"8fd29da9-c781-4e6d-857c-17f84d72c639\") " pod="openstack/barbican-db-sync-64jmg" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.600415 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c5e102f-3947-4eae-a8de-78bb48d177e9-config-data\") pod \"ceilometer-0\" (UID: \"9c5e102f-3947-4eae-a8de-78bb48d177e9\") " pod="openstack/ceilometer-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.600831 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9c5e102f-3947-4eae-a8de-78bb48d177e9-run-httpd\") pod \"ceilometer-0\" (UID: \"9c5e102f-3947-4eae-a8de-78bb48d177e9\") " pod="openstack/ceilometer-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.600881 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mwjq4\" (UniqueName: \"kubernetes.io/projected/e9b49f6e-b7fe-4dd8-81d2-738fd521c166-kube-api-access-mwjq4\") pod \"dnsmasq-dns-785d8bcb8c-cb68t\" (UID: \"e9b49f6e-b7fe-4dd8-81d2-738fd521c166\") " pod="openstack/dnsmasq-dns-785d8bcb8c-cb68t" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.600931 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9c5e102f-3947-4eae-a8de-78bb48d177e9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9c5e102f-3947-4eae-a8de-78bb48d177e9\") " pod="openstack/ceilometer-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.600952 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9c5e102f-3947-4eae-a8de-78bb48d177e9-scripts\") pod \"ceilometer-0\" (UID: \"9c5e102f-3947-4eae-a8de-78bb48d177e9\") " pod="openstack/ceilometer-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.600992 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e9b49f6e-b7fe-4dd8-81d2-738fd521c166-ovsdbserver-sb\") pod \"dnsmasq-dns-785d8bcb8c-cb68t\" (UID: \"e9b49f6e-b7fe-4dd8-81d2-738fd521c166\") " pod="openstack/dnsmasq-dns-785d8bcb8c-cb68t" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.601033 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e9b49f6e-b7fe-4dd8-81d2-738fd521c166-dns-swift-storage-0\") pod \"dnsmasq-dns-785d8bcb8c-cb68t\" (UID: \"e9b49f6e-b7fe-4dd8-81d2-738fd521c166\") " pod="openstack/dnsmasq-dns-785d8bcb8c-cb68t" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.601067 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e9b49f6e-b7fe-4dd8-81d2-738fd521c166-ovsdbserver-nb\") pod \"dnsmasq-dns-785d8bcb8c-cb68t\" (UID: \"e9b49f6e-b7fe-4dd8-81d2-738fd521c166\") " pod="openstack/dnsmasq-dns-785d8bcb8c-cb68t" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.601100 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nxwcz\" (UniqueName: \"kubernetes.io/projected/9c5e102f-3947-4eae-a8de-78bb48d177e9-kube-api-access-nxwcz\") pod \"ceilometer-0\" (UID: \"9c5e102f-3947-4eae-a8de-78bb48d177e9\") " pod="openstack/ceilometer-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.601182 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c5e102f-3947-4eae-a8de-78bb48d177e9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9c5e102f-3947-4eae-a8de-78bb48d177e9\") " pod="openstack/ceilometer-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.601226 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9b49f6e-b7fe-4dd8-81d2-738fd521c166-config\") pod \"dnsmasq-dns-785d8bcb8c-cb68t\" (UID: \"e9b49f6e-b7fe-4dd8-81d2-738fd521c166\") " pod="openstack/dnsmasq-dns-785d8bcb8c-cb68t" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.601247 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9c5e102f-3947-4eae-a8de-78bb48d177e9-log-httpd\") pod \"ceilometer-0\" (UID: \"9c5e102f-3947-4eae-a8de-78bb48d177e9\") " pod="openstack/ceilometer-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.601296 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e9b49f6e-b7fe-4dd8-81d2-738fd521c166-dns-svc\") pod \"dnsmasq-dns-785d8bcb8c-cb68t\" (UID: \"e9b49f6e-b7fe-4dd8-81d2-738fd521c166\") " pod="openstack/dnsmasq-dns-785d8bcb8c-cb68t" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.602341 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e9b49f6e-b7fe-4dd8-81d2-738fd521c166-dns-svc\") pod \"dnsmasq-dns-785d8bcb8c-cb68t\" (UID: \"e9b49f6e-b7fe-4dd8-81d2-738fd521c166\") " pod="openstack/dnsmasq-dns-785d8bcb8c-cb68t" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.603434 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e9b49f6e-b7fe-4dd8-81d2-738fd521c166-ovsdbserver-sb\") pod \"dnsmasq-dns-785d8bcb8c-cb68t\" (UID: \"e9b49f6e-b7fe-4dd8-81d2-738fd521c166\") " pod="openstack/dnsmasq-dns-785d8bcb8c-cb68t" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.603893 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e9b49f6e-b7fe-4dd8-81d2-738fd521c166-dns-swift-storage-0\") pod \"dnsmasq-dns-785d8bcb8c-cb68t\" (UID: \"e9b49f6e-b7fe-4dd8-81d2-738fd521c166\") " pod="openstack/dnsmasq-dns-785d8bcb8c-cb68t" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.604589 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e9b49f6e-b7fe-4dd8-81d2-738fd521c166-ovsdbserver-nb\") pod \"dnsmasq-dns-785d8bcb8c-cb68t\" (UID: \"e9b49f6e-b7fe-4dd8-81d2-738fd521c166\") " pod="openstack/dnsmasq-dns-785d8bcb8c-cb68t" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.609010 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9b49f6e-b7fe-4dd8-81d2-738fd521c166-config\") pod \"dnsmasq-dns-785d8bcb8c-cb68t\" (UID: \"e9b49f6e-b7fe-4dd8-81d2-738fd521c166\") " pod="openstack/dnsmasq-dns-785d8bcb8c-cb68t" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.618895 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.621369 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.624916 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.627074 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.627932 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-df9ww" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.628292 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-64jmg" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.631810 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mwjq4\" (UniqueName: \"kubernetes.io/projected/e9b49f6e-b7fe-4dd8-81d2-738fd521c166-kube-api-access-mwjq4\") pod \"dnsmasq-dns-785d8bcb8c-cb68t\" (UID: \"e9b49f6e-b7fe-4dd8-81d2-738fd521c166\") " pod="openstack/dnsmasq-dns-785d8bcb8c-cb68t" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.646870 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-847c4cc679-lhj76"] Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.674423 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-785d8bcb8c-cb68t" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.677483 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.710262 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c5e102f-3947-4eae-a8de-78bb48d177e9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9c5e102f-3947-4eae-a8de-78bb48d177e9\") " pod="openstack/ceilometer-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.710358 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9c5e102f-3947-4eae-a8de-78bb48d177e9-log-httpd\") pod \"ceilometer-0\" (UID: \"9c5e102f-3947-4eae-a8de-78bb48d177e9\") " pod="openstack/ceilometer-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.710523 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c5e102f-3947-4eae-a8de-78bb48d177e9-config-data\") pod \"ceilometer-0\" (UID: \"9c5e102f-3947-4eae-a8de-78bb48d177e9\") " pod="openstack/ceilometer-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.710751 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9c5e102f-3947-4eae-a8de-78bb48d177e9-run-httpd\") pod \"ceilometer-0\" (UID: \"9c5e102f-3947-4eae-a8de-78bb48d177e9\") " pod="openstack/ceilometer-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.710817 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/509237a9-38cc-4268-adeb-2f655ec3da7b-logs\") pod \"glance-default-external-api-0\" (UID: \"509237a9-38cc-4268-adeb-2f655ec3da7b\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.710883 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9c5e102f-3947-4eae-a8de-78bb48d177e9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9c5e102f-3947-4eae-a8de-78bb48d177e9\") " pod="openstack/ceilometer-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.710918 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9c5e102f-3947-4eae-a8de-78bb48d177e9-scripts\") pod \"ceilometer-0\" (UID: \"9c5e102f-3947-4eae-a8de-78bb48d177e9\") " pod="openstack/ceilometer-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.710954 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/509237a9-38cc-4268-adeb-2f655ec3da7b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"509237a9-38cc-4268-adeb-2f655ec3da7b\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.710994 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"509237a9-38cc-4268-adeb-2f655ec3da7b\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.711027 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-788h8\" (UniqueName: \"kubernetes.io/projected/509237a9-38cc-4268-adeb-2f655ec3da7b-kube-api-access-788h8\") pod \"glance-default-external-api-0\" (UID: \"509237a9-38cc-4268-adeb-2f655ec3da7b\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.711061 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/509237a9-38cc-4268-adeb-2f655ec3da7b-config-data\") pod \"glance-default-external-api-0\" (UID: \"509237a9-38cc-4268-adeb-2f655ec3da7b\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.711105 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nxwcz\" (UniqueName: \"kubernetes.io/projected/9c5e102f-3947-4eae-a8de-78bb48d177e9-kube-api-access-nxwcz\") pod \"ceilometer-0\" (UID: \"9c5e102f-3947-4eae-a8de-78bb48d177e9\") " pod="openstack/ceilometer-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.711132 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/509237a9-38cc-4268-adeb-2f655ec3da7b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"509237a9-38cc-4268-adeb-2f655ec3da7b\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.711182 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/509237a9-38cc-4268-adeb-2f655ec3da7b-scripts\") pod \"glance-default-external-api-0\" (UID: \"509237a9-38cc-4268-adeb-2f655ec3da7b\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.711611 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9c5e102f-3947-4eae-a8de-78bb48d177e9-log-httpd\") pod \"ceilometer-0\" (UID: \"9c5e102f-3947-4eae-a8de-78bb48d177e9\") " pod="openstack/ceilometer-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.719005 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c5e102f-3947-4eae-a8de-78bb48d177e9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9c5e102f-3947-4eae-a8de-78bb48d177e9\") " pod="openstack/ceilometer-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.719446 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9c5e102f-3947-4eae-a8de-78bb48d177e9-run-httpd\") pod \"ceilometer-0\" (UID: \"9c5e102f-3947-4eae-a8de-78bb48d177e9\") " pod="openstack/ceilometer-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.725755 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c5e102f-3947-4eae-a8de-78bb48d177e9-config-data\") pod \"ceilometer-0\" (UID: \"9c5e102f-3947-4eae-a8de-78bb48d177e9\") " pod="openstack/ceilometer-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.730605 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9c5e102f-3947-4eae-a8de-78bb48d177e9-scripts\") pod \"ceilometer-0\" (UID: \"9c5e102f-3947-4eae-a8de-78bb48d177e9\") " pod="openstack/ceilometer-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.743561 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9c5e102f-3947-4eae-a8de-78bb48d177e9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9c5e102f-3947-4eae-a8de-78bb48d177e9\") " pod="openstack/ceilometer-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.754436 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-g4z57"] Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.772753 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.776151 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nxwcz\" (UniqueName: \"kubernetes.io/projected/9c5e102f-3947-4eae-a8de-78bb48d177e9-kube-api-access-nxwcz\") pod \"ceilometer-0\" (UID: \"9c5e102f-3947-4eae-a8de-78bb48d177e9\") " pod="openstack/ceilometer-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.785632 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.800569 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.802337 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.812398 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.813476 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/509237a9-38cc-4268-adeb-2f655ec3da7b-logs\") pod \"glance-default-external-api-0\" (UID: \"509237a9-38cc-4268-adeb-2f655ec3da7b\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.813557 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/509237a9-38cc-4268-adeb-2f655ec3da7b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"509237a9-38cc-4268-adeb-2f655ec3da7b\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.813591 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"509237a9-38cc-4268-adeb-2f655ec3da7b\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.813609 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-788h8\" (UniqueName: \"kubernetes.io/projected/509237a9-38cc-4268-adeb-2f655ec3da7b-kube-api-access-788h8\") pod \"glance-default-external-api-0\" (UID: \"509237a9-38cc-4268-adeb-2f655ec3da7b\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.813665 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/509237a9-38cc-4268-adeb-2f655ec3da7b-config-data\") pod \"glance-default-external-api-0\" (UID: \"509237a9-38cc-4268-adeb-2f655ec3da7b\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.813707 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/509237a9-38cc-4268-adeb-2f655ec3da7b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"509237a9-38cc-4268-adeb-2f655ec3da7b\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.813737 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/509237a9-38cc-4268-adeb-2f655ec3da7b-scripts\") pod \"glance-default-external-api-0\" (UID: \"509237a9-38cc-4268-adeb-2f655ec3da7b\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.815489 4716 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"509237a9-38cc-4268-adeb-2f655ec3da7b\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/glance-default-external-api-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.818131 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/509237a9-38cc-4268-adeb-2f655ec3da7b-logs\") pod \"glance-default-external-api-0\" (UID: \"509237a9-38cc-4268-adeb-2f655ec3da7b\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.819097 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/509237a9-38cc-4268-adeb-2f655ec3da7b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"509237a9-38cc-4268-adeb-2f655ec3da7b\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.827245 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/509237a9-38cc-4268-adeb-2f655ec3da7b-scripts\") pod \"glance-default-external-api-0\" (UID: \"509237a9-38cc-4268-adeb-2f655ec3da7b\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.837056 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/509237a9-38cc-4268-adeb-2f655ec3da7b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"509237a9-38cc-4268-adeb-2f655ec3da7b\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.838082 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-788h8\" (UniqueName: \"kubernetes.io/projected/509237a9-38cc-4268-adeb-2f655ec3da7b-kube-api-access-788h8\") pod \"glance-default-external-api-0\" (UID: \"509237a9-38cc-4268-adeb-2f655ec3da7b\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.845955 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/509237a9-38cc-4268-adeb-2f655ec3da7b-config-data\") pod \"glance-default-external-api-0\" (UID: \"509237a9-38cc-4268-adeb-2f655ec3da7b\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.912741 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"509237a9-38cc-4268-adeb-2f655ec3da7b\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.922714 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5ebafd4-a555-4f4d-994a-49d65913d92a-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f5ebafd4-a555-4f4d-994a-49d65913d92a\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.922787 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"f5ebafd4-a555-4f4d-994a-49d65913d92a\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.922818 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j2l9v\" (UniqueName: \"kubernetes.io/projected/f5ebafd4-a555-4f4d-994a-49d65913d92a-kube-api-access-j2l9v\") pod \"glance-default-internal-api-0\" (UID: \"f5ebafd4-a555-4f4d-994a-49d65913d92a\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.922875 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5ebafd4-a555-4f4d-994a-49d65913d92a-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f5ebafd4-a555-4f4d-994a-49d65913d92a\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.922946 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f5ebafd4-a555-4f4d-994a-49d65913d92a-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f5ebafd4-a555-4f4d-994a-49d65913d92a\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.922984 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f5ebafd4-a555-4f4d-994a-49d65913d92a-logs\") pod \"glance-default-internal-api-0\" (UID: \"f5ebafd4-a555-4f4d-994a-49d65913d92a\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.923027 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5ebafd4-a555-4f4d-994a-49d65913d92a-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f5ebafd4-a555-4f4d-994a-49d65913d92a\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:19 crc kubenswrapper[4716]: I1209 15:32:19.979831 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 09 15:32:20 crc kubenswrapper[4716]: I1209 15:32:20.021110 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-2rwfp"] Dec 09 15:32:20 crc kubenswrapper[4716]: I1209 15:32:20.024577 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f5ebafd4-a555-4f4d-994a-49d65913d92a-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f5ebafd4-a555-4f4d-994a-49d65913d92a\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:20 crc kubenswrapper[4716]: I1209 15:32:20.024666 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f5ebafd4-a555-4f4d-994a-49d65913d92a-logs\") pod \"glance-default-internal-api-0\" (UID: \"f5ebafd4-a555-4f4d-994a-49d65913d92a\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:20 crc kubenswrapper[4716]: I1209 15:32:20.024726 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5ebafd4-a555-4f4d-994a-49d65913d92a-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f5ebafd4-a555-4f4d-994a-49d65913d92a\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:20 crc kubenswrapper[4716]: I1209 15:32:20.024770 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5ebafd4-a555-4f4d-994a-49d65913d92a-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f5ebafd4-a555-4f4d-994a-49d65913d92a\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:20 crc kubenswrapper[4716]: I1209 15:32:20.024821 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"f5ebafd4-a555-4f4d-994a-49d65913d92a\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:20 crc kubenswrapper[4716]: I1209 15:32:20.025041 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j2l9v\" (UniqueName: \"kubernetes.io/projected/f5ebafd4-a555-4f4d-994a-49d65913d92a-kube-api-access-j2l9v\") pod \"glance-default-internal-api-0\" (UID: \"f5ebafd4-a555-4f4d-994a-49d65913d92a\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:20 crc kubenswrapper[4716]: I1209 15:32:20.025106 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5ebafd4-a555-4f4d-994a-49d65913d92a-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f5ebafd4-a555-4f4d-994a-49d65913d92a\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:20 crc kubenswrapper[4716]: I1209 15:32:20.027717 4716 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"f5ebafd4-a555-4f4d-994a-49d65913d92a\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-internal-api-0" Dec 09 15:32:20 crc kubenswrapper[4716]: I1209 15:32:20.028864 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f5ebafd4-a555-4f4d-994a-49d65913d92a-logs\") pod \"glance-default-internal-api-0\" (UID: \"f5ebafd4-a555-4f4d-994a-49d65913d92a\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:20 crc kubenswrapper[4716]: I1209 15:32:20.028988 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f5ebafd4-a555-4f4d-994a-49d65913d92a-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f5ebafd4-a555-4f4d-994a-49d65913d92a\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:20 crc kubenswrapper[4716]: I1209 15:32:20.034322 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5ebafd4-a555-4f4d-994a-49d65913d92a-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f5ebafd4-a555-4f4d-994a-49d65913d92a\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:20 crc kubenswrapper[4716]: I1209 15:32:20.035592 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5ebafd4-a555-4f4d-994a-49d65913d92a-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f5ebafd4-a555-4f4d-994a-49d65913d92a\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:20 crc kubenswrapper[4716]: I1209 15:32:20.035732 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5ebafd4-a555-4f4d-994a-49d65913d92a-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f5ebafd4-a555-4f4d-994a-49d65913d92a\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:20 crc kubenswrapper[4716]: I1209 15:32:20.056866 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j2l9v\" (UniqueName: \"kubernetes.io/projected/f5ebafd4-a555-4f4d-994a-49d65913d92a-kube-api-access-j2l9v\") pod \"glance-default-internal-api-0\" (UID: \"f5ebafd4-a555-4f4d-994a-49d65913d92a\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:20 crc kubenswrapper[4716]: W1209 15:32:20.082839 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3aba2daa_0fe3_419f_a361_1be829c1e3d0.slice/crio-0046b8ae3f49110f1d74e337b9eb52a787332d8884560f52842f03d9374a22db WatchSource:0}: Error finding container 0046b8ae3f49110f1d74e337b9eb52a787332d8884560f52842f03d9374a22db: Status 404 returned error can't find the container with id 0046b8ae3f49110f1d74e337b9eb52a787332d8884560f52842f03d9374a22db Dec 09 15:32:20 crc kubenswrapper[4716]: I1209 15:32:20.154095 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"f5ebafd4-a555-4f4d-994a-49d65913d92a\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:20 crc kubenswrapper[4716]: I1209 15:32:20.328870 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-2rwfp" event={"ID":"3aba2daa-0fe3-419f-a361-1be829c1e3d0","Type":"ContainerStarted","Data":"0046b8ae3f49110f1d74e337b9eb52a787332d8884560f52842f03d9374a22db"} Dec 09 15:32:20 crc kubenswrapper[4716]: I1209 15:32:20.345029 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-g4z57" event={"ID":"06ef0151-4164-436b-b817-5522ac1b07dd","Type":"ContainerStarted","Data":"e8fe41b78121deea86cfd7f8454f068e656524ee050198dc9c7e992b3d1cac48"} Dec 09 15:32:20 crc kubenswrapper[4716]: I1209 15:32:20.365203 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-74f6bcbc87-54tdq" podUID="e97c9733-3a49-4920-a32b-7bb6b35786f7" containerName="dnsmasq-dns" containerID="cri-o://c30e15dfd2f005b2d84520f4a12046ca139be2d41f0e2a1d675770eb1af36ca7" gracePeriod=10 Dec 09 15:32:20 crc kubenswrapper[4716]: I1209 15:32:20.365335 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-847c4cc679-lhj76" event={"ID":"dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0","Type":"ContainerStarted","Data":"b2593cb4262a54bb592a1d0f579c96c3cb494ed42d3aee7484e11d3fc60a49df"} Dec 09 15:32:20 crc kubenswrapper[4716]: I1209 15:32:20.412940 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-d4ccd"] Dec 09 15:32:20 crc kubenswrapper[4716]: I1209 15:32:20.456006 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 09 15:32:20 crc kubenswrapper[4716]: I1209 15:32:20.574017 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-xr25h"] Dec 09 15:32:20 crc kubenswrapper[4716]: I1209 15:32:20.606226 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-sqhrf"] Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.019988 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-64jmg"] Dec 09 15:32:21 crc kubenswrapper[4716]: W1209 15:32:21.022552 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9c5e102f_3947_4eae_a8de_78bb48d177e9.slice/crio-a2f6a9910ea319b5e8ca4374f75b67992edddddd8d0c480d2ad5d957845c4bc2 WatchSource:0}: Error finding container a2f6a9910ea319b5e8ca4374f75b67992edddddd8d0c480d2ad5d957845c4bc2: Status 404 returned error can't find the container with id a2f6a9910ea319b5e8ca4374f75b67992edddddd8d0c480d2ad5d957845c4bc2 Dec 09 15:32:21 crc kubenswrapper[4716]: W1209 15:32:21.038600 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode9b49f6e_b7fe_4dd8_81d2_738fd521c166.slice/crio-6d35ede9ba59694d1927d7b7e8fd8892a84ae7e0eb73ecf4283f98726f8ae4ec WatchSource:0}: Error finding container 6d35ede9ba59694d1927d7b7e8fd8892a84ae7e0eb73ecf4283f98726f8ae4ec: Status 404 returned error can't find the container with id 6d35ede9ba59694d1927d7b7e8fd8892a84ae7e0eb73ecf4283f98726f8ae4ec Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.054962 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.105667 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-cb68t"] Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.293746 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.312916 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74f6bcbc87-54tdq" Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.389750 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tq267\" (UniqueName: \"kubernetes.io/projected/e97c9733-3a49-4920-a32b-7bb6b35786f7-kube-api-access-tq267\") pod \"e97c9733-3a49-4920-a32b-7bb6b35786f7\" (UID: \"e97c9733-3a49-4920-a32b-7bb6b35786f7\") " Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.390166 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e97c9733-3a49-4920-a32b-7bb6b35786f7-dns-svc\") pod \"e97c9733-3a49-4920-a32b-7bb6b35786f7\" (UID: \"e97c9733-3a49-4920-a32b-7bb6b35786f7\") " Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.390290 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e97c9733-3a49-4920-a32b-7bb6b35786f7-dns-swift-storage-0\") pod \"e97c9733-3a49-4920-a32b-7bb6b35786f7\" (UID: \"e97c9733-3a49-4920-a32b-7bb6b35786f7\") " Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.390323 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e97c9733-3a49-4920-a32b-7bb6b35786f7-ovsdbserver-nb\") pod \"e97c9733-3a49-4920-a32b-7bb6b35786f7\" (UID: \"e97c9733-3a49-4920-a32b-7bb6b35786f7\") " Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.390434 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e97c9733-3a49-4920-a32b-7bb6b35786f7-config\") pod \"e97c9733-3a49-4920-a32b-7bb6b35786f7\" (UID: \"e97c9733-3a49-4920-a32b-7bb6b35786f7\") " Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.390509 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e97c9733-3a49-4920-a32b-7bb6b35786f7-ovsdbserver-sb\") pod \"e97c9733-3a49-4920-a32b-7bb6b35786f7\" (UID: \"e97c9733-3a49-4920-a32b-7bb6b35786f7\") " Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.406519 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e97c9733-3a49-4920-a32b-7bb6b35786f7-kube-api-access-tq267" (OuterVolumeSpecName: "kube-api-access-tq267") pod "e97c9733-3a49-4920-a32b-7bb6b35786f7" (UID: "e97c9733-3a49-4920-a32b-7bb6b35786f7"). InnerVolumeSpecName "kube-api-access-tq267". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.493978 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tq267\" (UniqueName: \"kubernetes.io/projected/e97c9733-3a49-4920-a32b-7bb6b35786f7-kube-api-access-tq267\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.499273 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.512451 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-2rwfp" event={"ID":"3aba2daa-0fe3-419f-a361-1be829c1e3d0","Type":"ContainerStarted","Data":"a98698868ee6063344f074ce17ef56b78cebc9dffd3e4652fa4aacae3cb4e5aa"} Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.517597 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9c5e102f-3947-4eae-a8de-78bb48d177e9","Type":"ContainerStarted","Data":"a2f6a9910ea319b5e8ca4374f75b67992edddddd8d0c480d2ad5d957845c4bc2"} Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.519576 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"509237a9-38cc-4268-adeb-2f655ec3da7b","Type":"ContainerStarted","Data":"2f22137560541db24bb0ba11efba2ffa2238a17b0343b6b1fb8d264edcdd1390"} Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.520957 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-64jmg" event={"ID":"8fd29da9-c781-4e6d-857c-17f84d72c639","Type":"ContainerStarted","Data":"eacea93afa4677e3e8be7ef754de20479097de957443f661a6813d08fba41339"} Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.526802 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-xr25h" event={"ID":"e1fa7713-f67e-45a2-81c0-73a56280f744","Type":"ContainerStarted","Data":"dfac4872f3922d5a0b16ce68f61b7fc2ada3865af3e4d09569530f6d443746fc"} Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.559676 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-cb68t" event={"ID":"e9b49f6e-b7fe-4dd8-81d2-738fd521c166","Type":"ContainerStarted","Data":"6d35ede9ba59694d1927d7b7e8fd8892a84ae7e0eb73ecf4283f98726f8ae4ec"} Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.568063 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-2rwfp" podStartSLOduration=3.56803737 podStartE2EDuration="3.56803737s" podCreationTimestamp="2025-12-09 15:32:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:32:21.544189723 +0000 UTC m=+1428.698933711" watchObservedRunningTime="2025-12-09 15:32:21.56803737 +0000 UTC m=+1428.722781358" Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.576310 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-d4ccd" event={"ID":"2c59c8f4-e888-4345-8cdd-5581ef0f801c","Type":"ContainerStarted","Data":"8d702c4cba67503ba15818e468e4ac3ef52bf36babcb6a91c711ef98ba04b14c"} Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.581419 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-g4z57" event={"ID":"06ef0151-4164-436b-b817-5522ac1b07dd","Type":"ContainerStarted","Data":"144be49e7153729fc6b17d2974c938a069e5b19748948b71728ee1a36e2b52a0"} Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.590479 4716 generic.go:334] "Generic (PLEG): container finished" podID="dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0" containerID="d628e166f551b753003499883adcfaeb156e4f8d1122c043780c9c6769ccddbd" exitCode=0 Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.590557 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-847c4cc679-lhj76" event={"ID":"dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0","Type":"ContainerDied","Data":"d628e166f551b753003499883adcfaeb156e4f8d1122c043780c9c6769ccddbd"} Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.591111 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.610903 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-sqhrf" event={"ID":"c31e7c21-64fd-4bb2-b165-df1743489363","Type":"ContainerStarted","Data":"03eb247dbe197e4cc3e62113bc0166fd2a41a93ed41956ae53b8942655dd7567"} Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.645673 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.647949 4716 generic.go:334] "Generic (PLEG): container finished" podID="e97c9733-3a49-4920-a32b-7bb6b35786f7" containerID="c30e15dfd2f005b2d84520f4a12046ca139be2d41f0e2a1d675770eb1af36ca7" exitCode=0 Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.648046 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74f6bcbc87-54tdq" Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.648062 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f6bcbc87-54tdq" event={"ID":"e97c9733-3a49-4920-a32b-7bb6b35786f7","Type":"ContainerDied","Data":"c30e15dfd2f005b2d84520f4a12046ca139be2d41f0e2a1d675770eb1af36ca7"} Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.648387 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f6bcbc87-54tdq" event={"ID":"e97c9733-3a49-4920-a32b-7bb6b35786f7","Type":"ContainerDied","Data":"992f744db4e3ea391e9aa0f7b168343561bee425cd7d8492f3606e92dccb0c90"} Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.648412 4716 scope.go:117] "RemoveContainer" containerID="c30e15dfd2f005b2d84520f4a12046ca139be2d41f0e2a1d675770eb1af36ca7" Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.653566 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-g4z57" podStartSLOduration=3.653535534 podStartE2EDuration="3.653535534s" podCreationTimestamp="2025-12-09 15:32:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:32:21.623350514 +0000 UTC m=+1428.778094492" watchObservedRunningTime="2025-12-09 15:32:21.653535534 +0000 UTC m=+1428.808279522" Dec 09 15:32:21 crc kubenswrapper[4716]: W1209 15:32:21.660979 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf5ebafd4_a555_4f4d_994a_49d65913d92a.slice/crio-0f71f9ea6bd8a988917caaf71c2ea9984a3ab6477371a2d4bd22db09e2a37cce WatchSource:0}: Error finding container 0f71f9ea6bd8a988917caaf71c2ea9984a3ab6477371a2d4bd22db09e2a37cce: Status 404 returned error can't find the container with id 0f71f9ea6bd8a988917caaf71c2ea9984a3ab6477371a2d4bd22db09e2a37cce Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.780261 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.847181 4716 scope.go:117] "RemoveContainer" containerID="7869062ed6b4b26c11d789a8f08f152167eff7a2cf7a80bb50e4a69080caa2f6" Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.852794 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e97c9733-3a49-4920-a32b-7bb6b35786f7-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e97c9733-3a49-4920-a32b-7bb6b35786f7" (UID: "e97c9733-3a49-4920-a32b-7bb6b35786f7"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.876513 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e97c9733-3a49-4920-a32b-7bb6b35786f7-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "e97c9733-3a49-4920-a32b-7bb6b35786f7" (UID: "e97c9733-3a49-4920-a32b-7bb6b35786f7"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.894064 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e97c9733-3a49-4920-a32b-7bb6b35786f7-config" (OuterVolumeSpecName: "config") pod "e97c9733-3a49-4920-a32b-7bb6b35786f7" (UID: "e97c9733-3a49-4920-a32b-7bb6b35786f7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.910790 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e97c9733-3a49-4920-a32b-7bb6b35786f7-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e97c9733-3a49-4920-a32b-7bb6b35786f7" (UID: "e97c9733-3a49-4920-a32b-7bb6b35786f7"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.932442 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e97c9733-3a49-4920-a32b-7bb6b35786f7-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e97c9733-3a49-4920-a32b-7bb6b35786f7" (UID: "e97c9733-3a49-4920-a32b-7bb6b35786f7"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.934301 4716 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e97c9733-3a49-4920-a32b-7bb6b35786f7-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.934332 4716 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e97c9733-3a49-4920-a32b-7bb6b35786f7-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.934342 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e97c9733-3a49-4920-a32b-7bb6b35786f7-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.934353 4716 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e97c9733-3a49-4920-a32b-7bb6b35786f7-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:21 crc kubenswrapper[4716]: I1209 15:32:21.934361 4716 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e97c9733-3a49-4920-a32b-7bb6b35786f7-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:22 crc kubenswrapper[4716]: I1209 15:32:22.108804 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-74f6bcbc87-54tdq"] Dec 09 15:32:22 crc kubenswrapper[4716]: I1209 15:32:22.135055 4716 scope.go:117] "RemoveContainer" containerID="c30e15dfd2f005b2d84520f4a12046ca139be2d41f0e2a1d675770eb1af36ca7" Dec 09 15:32:22 crc kubenswrapper[4716]: E1209 15:32:22.136591 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c30e15dfd2f005b2d84520f4a12046ca139be2d41f0e2a1d675770eb1af36ca7\": container with ID starting with c30e15dfd2f005b2d84520f4a12046ca139be2d41f0e2a1d675770eb1af36ca7 not found: ID does not exist" containerID="c30e15dfd2f005b2d84520f4a12046ca139be2d41f0e2a1d675770eb1af36ca7" Dec 09 15:32:22 crc kubenswrapper[4716]: I1209 15:32:22.136676 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c30e15dfd2f005b2d84520f4a12046ca139be2d41f0e2a1d675770eb1af36ca7"} err="failed to get container status \"c30e15dfd2f005b2d84520f4a12046ca139be2d41f0e2a1d675770eb1af36ca7\": rpc error: code = NotFound desc = could not find container \"c30e15dfd2f005b2d84520f4a12046ca139be2d41f0e2a1d675770eb1af36ca7\": container with ID starting with c30e15dfd2f005b2d84520f4a12046ca139be2d41f0e2a1d675770eb1af36ca7 not found: ID does not exist" Dec 09 15:32:22 crc kubenswrapper[4716]: I1209 15:32:22.136717 4716 scope.go:117] "RemoveContainer" containerID="7869062ed6b4b26c11d789a8f08f152167eff7a2cf7a80bb50e4a69080caa2f6" Dec 09 15:32:22 crc kubenswrapper[4716]: E1209 15:32:22.138028 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7869062ed6b4b26c11d789a8f08f152167eff7a2cf7a80bb50e4a69080caa2f6\": container with ID starting with 7869062ed6b4b26c11d789a8f08f152167eff7a2cf7a80bb50e4a69080caa2f6 not found: ID does not exist" containerID="7869062ed6b4b26c11d789a8f08f152167eff7a2cf7a80bb50e4a69080caa2f6" Dec 09 15:32:22 crc kubenswrapper[4716]: I1209 15:32:22.138066 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7869062ed6b4b26c11d789a8f08f152167eff7a2cf7a80bb50e4a69080caa2f6"} err="failed to get container status \"7869062ed6b4b26c11d789a8f08f152167eff7a2cf7a80bb50e4a69080caa2f6\": rpc error: code = NotFound desc = could not find container \"7869062ed6b4b26c11d789a8f08f152167eff7a2cf7a80bb50e4a69080caa2f6\": container with ID starting with 7869062ed6b4b26c11d789a8f08f152167eff7a2cf7a80bb50e4a69080caa2f6 not found: ID does not exist" Dec 09 15:32:22 crc kubenswrapper[4716]: I1209 15:32:22.144148 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-74f6bcbc87-54tdq"] Dec 09 15:32:22 crc kubenswrapper[4716]: I1209 15:32:22.192466 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-847c4cc679-lhj76" Dec 09 15:32:22 crc kubenswrapper[4716]: I1209 15:32:22.344319 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n95rc\" (UniqueName: \"kubernetes.io/projected/dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0-kube-api-access-n95rc\") pod \"dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0\" (UID: \"dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0\") " Dec 09 15:32:22 crc kubenswrapper[4716]: I1209 15:32:22.344439 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0-dns-swift-storage-0\") pod \"dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0\" (UID: \"dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0\") " Dec 09 15:32:22 crc kubenswrapper[4716]: I1209 15:32:22.344651 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0-ovsdbserver-nb\") pod \"dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0\" (UID: \"dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0\") " Dec 09 15:32:22 crc kubenswrapper[4716]: I1209 15:32:22.344826 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0-config\") pod \"dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0\" (UID: \"dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0\") " Dec 09 15:32:22 crc kubenswrapper[4716]: I1209 15:32:22.344855 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0-dns-svc\") pod \"dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0\" (UID: \"dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0\") " Dec 09 15:32:22 crc kubenswrapper[4716]: I1209 15:32:22.344891 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0-ovsdbserver-sb\") pod \"dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0\" (UID: \"dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0\") " Dec 09 15:32:22 crc kubenswrapper[4716]: I1209 15:32:22.353655 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0-kube-api-access-n95rc" (OuterVolumeSpecName: "kube-api-access-n95rc") pod "dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0" (UID: "dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0"). InnerVolumeSpecName "kube-api-access-n95rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:32:22 crc kubenswrapper[4716]: I1209 15:32:22.382723 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0-config" (OuterVolumeSpecName: "config") pod "dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0" (UID: "dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:32:22 crc kubenswrapper[4716]: I1209 15:32:22.395614 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0" (UID: "dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:32:22 crc kubenswrapper[4716]: I1209 15:32:22.425498 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0" (UID: "dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:32:22 crc kubenswrapper[4716]: I1209 15:32:22.427552 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0" (UID: "dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:32:22 crc kubenswrapper[4716]: I1209 15:32:22.430502 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0" (UID: "dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:32:22 crc kubenswrapper[4716]: I1209 15:32:22.447760 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:22 crc kubenswrapper[4716]: I1209 15:32:22.448251 4716 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:22 crc kubenswrapper[4716]: I1209 15:32:22.448268 4716 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:22 crc kubenswrapper[4716]: I1209 15:32:22.448281 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n95rc\" (UniqueName: \"kubernetes.io/projected/dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0-kube-api-access-n95rc\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:22 crc kubenswrapper[4716]: I1209 15:32:22.448293 4716 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:22 crc kubenswrapper[4716]: I1209 15:32:22.448304 4716 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:22 crc kubenswrapper[4716]: I1209 15:32:22.744309 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f5ebafd4-a555-4f4d-994a-49d65913d92a","Type":"ContainerStarted","Data":"0f71f9ea6bd8a988917caaf71c2ea9984a3ab6477371a2d4bd22db09e2a37cce"} Dec 09 15:32:22 crc kubenswrapper[4716]: I1209 15:32:22.762403 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-847c4cc679-lhj76" event={"ID":"dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0","Type":"ContainerDied","Data":"b2593cb4262a54bb592a1d0f579c96c3cb494ed42d3aee7484e11d3fc60a49df"} Dec 09 15:32:22 crc kubenswrapper[4716]: I1209 15:32:22.762475 4716 scope.go:117] "RemoveContainer" containerID="d628e166f551b753003499883adcfaeb156e4f8d1122c043780c9c6769ccddbd" Dec 09 15:32:22 crc kubenswrapper[4716]: I1209 15:32:22.762682 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-847c4cc679-lhj76" Dec 09 15:32:22 crc kubenswrapper[4716]: I1209 15:32:22.804000 4716 generic.go:334] "Generic (PLEG): container finished" podID="e9b49f6e-b7fe-4dd8-81d2-738fd521c166" containerID="96a0697c3bb7cbbe225ae1c8c1188987d1266dc9fd5ff51bd6d38e04e08586a8" exitCode=0 Dec 09 15:32:22 crc kubenswrapper[4716]: I1209 15:32:22.805078 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-cb68t" event={"ID":"e9b49f6e-b7fe-4dd8-81d2-738fd521c166","Type":"ContainerDied","Data":"96a0697c3bb7cbbe225ae1c8c1188987d1266dc9fd5ff51bd6d38e04e08586a8"} Dec 09 15:32:22 crc kubenswrapper[4716]: I1209 15:32:22.884780 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-847c4cc679-lhj76"] Dec 09 15:32:22 crc kubenswrapper[4716]: I1209 15:32:22.920734 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-847c4cc679-lhj76"] Dec 09 15:32:23 crc kubenswrapper[4716]: I1209 15:32:23.236396 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0" path="/var/lib/kubelet/pods/dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0/volumes" Dec 09 15:32:23 crc kubenswrapper[4716]: I1209 15:32:23.238605 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e97c9733-3a49-4920-a32b-7bb6b35786f7" path="/var/lib/kubelet/pods/e97c9733-3a49-4920-a32b-7bb6b35786f7/volumes" Dec 09 15:32:23 crc kubenswrapper[4716]: I1209 15:32:23.844720 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-cb68t" event={"ID":"e9b49f6e-b7fe-4dd8-81d2-738fd521c166","Type":"ContainerStarted","Data":"eefa6dbc722b250a9dcafef33492b3b9d14adef185af81ecbd15a3553c269d65"} Dec 09 15:32:23 crc kubenswrapper[4716]: I1209 15:32:23.845053 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-785d8bcb8c-cb68t" Dec 09 15:32:23 crc kubenswrapper[4716]: I1209 15:32:23.847071 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f5ebafd4-a555-4f4d-994a-49d65913d92a","Type":"ContainerStarted","Data":"f2ded43c610cad8b86baa411126311d8b93c7cbafa6919c02f0ee3652a9fe1d9"} Dec 09 15:32:23 crc kubenswrapper[4716]: I1209 15:32:23.855074 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"509237a9-38cc-4268-adeb-2f655ec3da7b","Type":"ContainerStarted","Data":"82950591ae088f98c5f5fadd7a267dbb80596c0d8800d2ca1c5dfce7cefd7776"} Dec 09 15:32:23 crc kubenswrapper[4716]: I1209 15:32:23.870481 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-785d8bcb8c-cb68t" podStartSLOduration=5.87045739 podStartE2EDuration="5.87045739s" podCreationTimestamp="2025-12-09 15:32:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:32:23.867827564 +0000 UTC m=+1431.022571552" watchObservedRunningTime="2025-12-09 15:32:23.87045739 +0000 UTC m=+1431.025201378" Dec 09 15:32:24 crc kubenswrapper[4716]: I1209 15:32:24.874758 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"509237a9-38cc-4268-adeb-2f655ec3da7b","Type":"ContainerStarted","Data":"5a4d39f346c0d90323678333e30f9e18485edd09b01c4e5f58862f529375d9b9"} Dec 09 15:32:24 crc kubenswrapper[4716]: I1209 15:32:24.874918 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="509237a9-38cc-4268-adeb-2f655ec3da7b" containerName="glance-log" containerID="cri-o://82950591ae088f98c5f5fadd7a267dbb80596c0d8800d2ca1c5dfce7cefd7776" gracePeriod=30 Dec 09 15:32:24 crc kubenswrapper[4716]: I1209 15:32:24.875159 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="509237a9-38cc-4268-adeb-2f655ec3da7b" containerName="glance-httpd" containerID="cri-o://5a4d39f346c0d90323678333e30f9e18485edd09b01c4e5f58862f529375d9b9" gracePeriod=30 Dec 09 15:32:24 crc kubenswrapper[4716]: I1209 15:32:24.883315 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f5ebafd4-a555-4f4d-994a-49d65913d92a","Type":"ContainerStarted","Data":"f2acb5f601f833afac23d9f9d226edc76ceb1e8b8d5d7adb47d6c17e30896a74"} Dec 09 15:32:24 crc kubenswrapper[4716]: I1209 15:32:24.883468 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="f5ebafd4-a555-4f4d-994a-49d65913d92a" containerName="glance-log" containerID="cri-o://f2ded43c610cad8b86baa411126311d8b93c7cbafa6919c02f0ee3652a9fe1d9" gracePeriod=30 Dec 09 15:32:24 crc kubenswrapper[4716]: I1209 15:32:24.883522 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="f5ebafd4-a555-4f4d-994a-49d65913d92a" containerName="glance-httpd" containerID="cri-o://f2acb5f601f833afac23d9f9d226edc76ceb1e8b8d5d7adb47d6c17e30896a74" gracePeriod=30 Dec 09 15:32:24 crc kubenswrapper[4716]: I1209 15:32:24.905874 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=6.905847941 podStartE2EDuration="6.905847941s" podCreationTimestamp="2025-12-09 15:32:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:32:24.899743945 +0000 UTC m=+1432.054487943" watchObservedRunningTime="2025-12-09 15:32:24.905847941 +0000 UTC m=+1432.060591929" Dec 09 15:32:24 crc kubenswrapper[4716]: I1209 15:32:24.935596 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=6.935571388 podStartE2EDuration="6.935571388s" podCreationTimestamp="2025-12-09 15:32:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:32:24.93531774 +0000 UTC m=+1432.090061728" watchObservedRunningTime="2025-12-09 15:32:24.935571388 +0000 UTC m=+1432.090315376" Dec 09 15:32:25 crc kubenswrapper[4716]: I1209 15:32:25.924802 4716 generic.go:334] "Generic (PLEG): container finished" podID="509237a9-38cc-4268-adeb-2f655ec3da7b" containerID="5a4d39f346c0d90323678333e30f9e18485edd09b01c4e5f58862f529375d9b9" exitCode=0 Dec 09 15:32:25 crc kubenswrapper[4716]: I1209 15:32:25.925133 4716 generic.go:334] "Generic (PLEG): container finished" podID="509237a9-38cc-4268-adeb-2f655ec3da7b" containerID="82950591ae088f98c5f5fadd7a267dbb80596c0d8800d2ca1c5dfce7cefd7776" exitCode=143 Dec 09 15:32:25 crc kubenswrapper[4716]: I1209 15:32:25.924893 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"509237a9-38cc-4268-adeb-2f655ec3da7b","Type":"ContainerDied","Data":"5a4d39f346c0d90323678333e30f9e18485edd09b01c4e5f58862f529375d9b9"} Dec 09 15:32:25 crc kubenswrapper[4716]: I1209 15:32:25.925248 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"509237a9-38cc-4268-adeb-2f655ec3da7b","Type":"ContainerDied","Data":"82950591ae088f98c5f5fadd7a267dbb80596c0d8800d2ca1c5dfce7cefd7776"} Dec 09 15:32:25 crc kubenswrapper[4716]: I1209 15:32:25.942934 4716 generic.go:334] "Generic (PLEG): container finished" podID="f5ebafd4-a555-4f4d-994a-49d65913d92a" containerID="f2acb5f601f833afac23d9f9d226edc76ceb1e8b8d5d7adb47d6c17e30896a74" exitCode=0 Dec 09 15:32:25 crc kubenswrapper[4716]: I1209 15:32:25.942977 4716 generic.go:334] "Generic (PLEG): container finished" podID="f5ebafd4-a555-4f4d-994a-49d65913d92a" containerID="f2ded43c610cad8b86baa411126311d8b93c7cbafa6919c02f0ee3652a9fe1d9" exitCode=143 Dec 09 15:32:25 crc kubenswrapper[4716]: I1209 15:32:25.943085 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f5ebafd4-a555-4f4d-994a-49d65913d92a","Type":"ContainerDied","Data":"f2acb5f601f833afac23d9f9d226edc76ceb1e8b8d5d7adb47d6c17e30896a74"} Dec 09 15:32:25 crc kubenswrapper[4716]: I1209 15:32:25.943122 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f5ebafd4-a555-4f4d-994a-49d65913d92a","Type":"ContainerDied","Data":"f2ded43c610cad8b86baa411126311d8b93c7cbafa6919c02f0ee3652a9fe1d9"} Dec 09 15:32:25 crc kubenswrapper[4716]: I1209 15:32:25.954081 4716 generic.go:334] "Generic (PLEG): container finished" podID="06ef0151-4164-436b-b817-5522ac1b07dd" containerID="144be49e7153729fc6b17d2974c938a069e5b19748948b71728ee1a36e2b52a0" exitCode=0 Dec 09 15:32:25 crc kubenswrapper[4716]: I1209 15:32:25.954135 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-g4z57" event={"ID":"06ef0151-4164-436b-b817-5522ac1b07dd","Type":"ContainerDied","Data":"144be49e7153729fc6b17d2974c938a069e5b19748948b71728ee1a36e2b52a0"} Dec 09 15:32:29 crc kubenswrapper[4716]: I1209 15:32:29.675857 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-785d8bcb8c-cb68t" Dec 09 15:32:29 crc kubenswrapper[4716]: I1209 15:32:29.747193 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-nwgsw"] Dec 09 15:32:29 crc kubenswrapper[4716]: I1209 15:32:29.747492 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-698758b865-nwgsw" podUID="5e0192ce-fa22-42ad-9c07-5154aa5b3801" containerName="dnsmasq-dns" containerID="cri-o://8383510e1ab5a6016d5b78a2724e66be30548fb6416cf327c09930c84aa90b98" gracePeriod=10 Dec 09 15:32:30 crc kubenswrapper[4716]: I1209 15:32:30.031346 4716 generic.go:334] "Generic (PLEG): container finished" podID="5e0192ce-fa22-42ad-9c07-5154aa5b3801" containerID="8383510e1ab5a6016d5b78a2724e66be30548fb6416cf327c09930c84aa90b98" exitCode=0 Dec 09 15:32:30 crc kubenswrapper[4716]: I1209 15:32:30.031402 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-nwgsw" event={"ID":"5e0192ce-fa22-42ad-9c07-5154aa5b3801","Type":"ContainerDied","Data":"8383510e1ab5a6016d5b78a2724e66be30548fb6416cf327c09930c84aa90b98"} Dec 09 15:32:32 crc kubenswrapper[4716]: I1209 15:32:32.629673 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-g4z57" Dec 09 15:32:32 crc kubenswrapper[4716]: I1209 15:32:32.782083 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06ef0151-4164-436b-b817-5522ac1b07dd-combined-ca-bundle\") pod \"06ef0151-4164-436b-b817-5522ac1b07dd\" (UID: \"06ef0151-4164-436b-b817-5522ac1b07dd\") " Dec 09 15:32:32 crc kubenswrapper[4716]: I1209 15:32:32.782170 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06ef0151-4164-436b-b817-5522ac1b07dd-config-data\") pod \"06ef0151-4164-436b-b817-5522ac1b07dd\" (UID: \"06ef0151-4164-436b-b817-5522ac1b07dd\") " Dec 09 15:32:32 crc kubenswrapper[4716]: I1209 15:32:32.782215 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/06ef0151-4164-436b-b817-5522ac1b07dd-scripts\") pod \"06ef0151-4164-436b-b817-5522ac1b07dd\" (UID: \"06ef0151-4164-436b-b817-5522ac1b07dd\") " Dec 09 15:32:32 crc kubenswrapper[4716]: I1209 15:32:32.782293 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/06ef0151-4164-436b-b817-5522ac1b07dd-credential-keys\") pod \"06ef0151-4164-436b-b817-5522ac1b07dd\" (UID: \"06ef0151-4164-436b-b817-5522ac1b07dd\") " Dec 09 15:32:32 crc kubenswrapper[4716]: I1209 15:32:32.782338 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/06ef0151-4164-436b-b817-5522ac1b07dd-fernet-keys\") pod \"06ef0151-4164-436b-b817-5522ac1b07dd\" (UID: \"06ef0151-4164-436b-b817-5522ac1b07dd\") " Dec 09 15:32:32 crc kubenswrapper[4716]: I1209 15:32:32.782382 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2vjj8\" (UniqueName: \"kubernetes.io/projected/06ef0151-4164-436b-b817-5522ac1b07dd-kube-api-access-2vjj8\") pod \"06ef0151-4164-436b-b817-5522ac1b07dd\" (UID: \"06ef0151-4164-436b-b817-5522ac1b07dd\") " Dec 09 15:32:32 crc kubenswrapper[4716]: I1209 15:32:32.788990 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06ef0151-4164-436b-b817-5522ac1b07dd-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "06ef0151-4164-436b-b817-5522ac1b07dd" (UID: "06ef0151-4164-436b-b817-5522ac1b07dd"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:32:32 crc kubenswrapper[4716]: I1209 15:32:32.789862 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06ef0151-4164-436b-b817-5522ac1b07dd-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "06ef0151-4164-436b-b817-5522ac1b07dd" (UID: "06ef0151-4164-436b-b817-5522ac1b07dd"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:32:32 crc kubenswrapper[4716]: I1209 15:32:32.790432 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06ef0151-4164-436b-b817-5522ac1b07dd-scripts" (OuterVolumeSpecName: "scripts") pod "06ef0151-4164-436b-b817-5522ac1b07dd" (UID: "06ef0151-4164-436b-b817-5522ac1b07dd"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:32:32 crc kubenswrapper[4716]: I1209 15:32:32.790606 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06ef0151-4164-436b-b817-5522ac1b07dd-kube-api-access-2vjj8" (OuterVolumeSpecName: "kube-api-access-2vjj8") pod "06ef0151-4164-436b-b817-5522ac1b07dd" (UID: "06ef0151-4164-436b-b817-5522ac1b07dd"). InnerVolumeSpecName "kube-api-access-2vjj8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:32:32 crc kubenswrapper[4716]: I1209 15:32:32.816429 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06ef0151-4164-436b-b817-5522ac1b07dd-config-data" (OuterVolumeSpecName: "config-data") pod "06ef0151-4164-436b-b817-5522ac1b07dd" (UID: "06ef0151-4164-436b-b817-5522ac1b07dd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:32:32 crc kubenswrapper[4716]: I1209 15:32:32.820083 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06ef0151-4164-436b-b817-5522ac1b07dd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "06ef0151-4164-436b-b817-5522ac1b07dd" (UID: "06ef0151-4164-436b-b817-5522ac1b07dd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:32:32 crc kubenswrapper[4716]: I1209 15:32:32.885646 4716 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/06ef0151-4164-436b-b817-5522ac1b07dd-fernet-keys\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:32 crc kubenswrapper[4716]: I1209 15:32:32.885678 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2vjj8\" (UniqueName: \"kubernetes.io/projected/06ef0151-4164-436b-b817-5522ac1b07dd-kube-api-access-2vjj8\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:32 crc kubenswrapper[4716]: I1209 15:32:32.885691 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06ef0151-4164-436b-b817-5522ac1b07dd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:32 crc kubenswrapper[4716]: I1209 15:32:32.885699 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06ef0151-4164-436b-b817-5522ac1b07dd-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:32 crc kubenswrapper[4716]: I1209 15:32:32.885707 4716 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/06ef0151-4164-436b-b817-5522ac1b07dd-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:32 crc kubenswrapper[4716]: I1209 15:32:32.885714 4716 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/06ef0151-4164-436b-b817-5522ac1b07dd-credential-keys\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:33 crc kubenswrapper[4716]: I1209 15:32:33.087349 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-g4z57" event={"ID":"06ef0151-4164-436b-b817-5522ac1b07dd","Type":"ContainerDied","Data":"e8fe41b78121deea86cfd7f8454f068e656524ee050198dc9c7e992b3d1cac48"} Dec 09 15:32:33 crc kubenswrapper[4716]: I1209 15:32:33.087398 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e8fe41b78121deea86cfd7f8454f068e656524ee050198dc9c7e992b3d1cac48" Dec 09 15:32:33 crc kubenswrapper[4716]: I1209 15:32:33.087898 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-g4z57" Dec 09 15:32:33 crc kubenswrapper[4716]: I1209 15:32:33.815000 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-g4z57"] Dec 09 15:32:33 crc kubenswrapper[4716]: I1209 15:32:33.824823 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-g4z57"] Dec 09 15:32:33 crc kubenswrapper[4716]: I1209 15:32:33.910671 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-7rkmg"] Dec 09 15:32:33 crc kubenswrapper[4716]: E1209 15:32:33.911236 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e97c9733-3a49-4920-a32b-7bb6b35786f7" containerName="init" Dec 09 15:32:33 crc kubenswrapper[4716]: I1209 15:32:33.911257 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="e97c9733-3a49-4920-a32b-7bb6b35786f7" containerName="init" Dec 09 15:32:33 crc kubenswrapper[4716]: E1209 15:32:33.911280 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0" containerName="init" Dec 09 15:32:33 crc kubenswrapper[4716]: I1209 15:32:33.911289 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0" containerName="init" Dec 09 15:32:33 crc kubenswrapper[4716]: E1209 15:32:33.911313 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e97c9733-3a49-4920-a32b-7bb6b35786f7" containerName="dnsmasq-dns" Dec 09 15:32:33 crc kubenswrapper[4716]: I1209 15:32:33.911345 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="e97c9733-3a49-4920-a32b-7bb6b35786f7" containerName="dnsmasq-dns" Dec 09 15:32:33 crc kubenswrapper[4716]: E1209 15:32:33.911361 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06ef0151-4164-436b-b817-5522ac1b07dd" containerName="keystone-bootstrap" Dec 09 15:32:33 crc kubenswrapper[4716]: I1209 15:32:33.911367 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="06ef0151-4164-436b-b817-5522ac1b07dd" containerName="keystone-bootstrap" Dec 09 15:32:33 crc kubenswrapper[4716]: I1209 15:32:33.911600 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="dbd2d0ec-97e3-4b13-8c6e-7e422b48a2a0" containerName="init" Dec 09 15:32:33 crc kubenswrapper[4716]: I1209 15:32:33.911644 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="06ef0151-4164-436b-b817-5522ac1b07dd" containerName="keystone-bootstrap" Dec 09 15:32:33 crc kubenswrapper[4716]: I1209 15:32:33.911667 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="e97c9733-3a49-4920-a32b-7bb6b35786f7" containerName="dnsmasq-dns" Dec 09 15:32:33 crc kubenswrapper[4716]: I1209 15:32:33.912522 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-7rkmg" Dec 09 15:32:33 crc kubenswrapper[4716]: I1209 15:32:33.918259 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Dec 09 15:32:33 crc kubenswrapper[4716]: I1209 15:32:33.918255 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Dec 09 15:32:33 crc kubenswrapper[4716]: I1209 15:32:33.918564 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-kzrzm" Dec 09 15:32:33 crc kubenswrapper[4716]: I1209 15:32:33.919010 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Dec 09 15:32:33 crc kubenswrapper[4716]: I1209 15:32:33.919224 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Dec 09 15:32:33 crc kubenswrapper[4716]: I1209 15:32:33.925875 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-7rkmg"] Dec 09 15:32:34 crc kubenswrapper[4716]: I1209 15:32:34.014171 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vqfx9\" (UniqueName: \"kubernetes.io/projected/ff2490f6-9bf3-41a3-a2a5-48e1e7d26738-kube-api-access-vqfx9\") pod \"keystone-bootstrap-7rkmg\" (UID: \"ff2490f6-9bf3-41a3-a2a5-48e1e7d26738\") " pod="openstack/keystone-bootstrap-7rkmg" Dec 09 15:32:34 crc kubenswrapper[4716]: I1209 15:32:34.014396 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ff2490f6-9bf3-41a3-a2a5-48e1e7d26738-credential-keys\") pod \"keystone-bootstrap-7rkmg\" (UID: \"ff2490f6-9bf3-41a3-a2a5-48e1e7d26738\") " pod="openstack/keystone-bootstrap-7rkmg" Dec 09 15:32:34 crc kubenswrapper[4716]: I1209 15:32:34.014430 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ff2490f6-9bf3-41a3-a2a5-48e1e7d26738-fernet-keys\") pod \"keystone-bootstrap-7rkmg\" (UID: \"ff2490f6-9bf3-41a3-a2a5-48e1e7d26738\") " pod="openstack/keystone-bootstrap-7rkmg" Dec 09 15:32:34 crc kubenswrapper[4716]: I1209 15:32:34.014459 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff2490f6-9bf3-41a3-a2a5-48e1e7d26738-combined-ca-bundle\") pod \"keystone-bootstrap-7rkmg\" (UID: \"ff2490f6-9bf3-41a3-a2a5-48e1e7d26738\") " pod="openstack/keystone-bootstrap-7rkmg" Dec 09 15:32:34 crc kubenswrapper[4716]: I1209 15:32:34.014481 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ff2490f6-9bf3-41a3-a2a5-48e1e7d26738-scripts\") pod \"keystone-bootstrap-7rkmg\" (UID: \"ff2490f6-9bf3-41a3-a2a5-48e1e7d26738\") " pod="openstack/keystone-bootstrap-7rkmg" Dec 09 15:32:34 crc kubenswrapper[4716]: I1209 15:32:34.014888 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff2490f6-9bf3-41a3-a2a5-48e1e7d26738-config-data\") pod \"keystone-bootstrap-7rkmg\" (UID: \"ff2490f6-9bf3-41a3-a2a5-48e1e7d26738\") " pod="openstack/keystone-bootstrap-7rkmg" Dec 09 15:32:34 crc kubenswrapper[4716]: I1209 15:32:34.107719 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-nwgsw" podUID="5e0192ce-fa22-42ad-9c07-5154aa5b3801" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.145:5353: connect: connection refused" Dec 09 15:32:34 crc kubenswrapper[4716]: I1209 15:32:34.117371 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff2490f6-9bf3-41a3-a2a5-48e1e7d26738-config-data\") pod \"keystone-bootstrap-7rkmg\" (UID: \"ff2490f6-9bf3-41a3-a2a5-48e1e7d26738\") " pod="openstack/keystone-bootstrap-7rkmg" Dec 09 15:32:34 crc kubenswrapper[4716]: I1209 15:32:34.117453 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vqfx9\" (UniqueName: \"kubernetes.io/projected/ff2490f6-9bf3-41a3-a2a5-48e1e7d26738-kube-api-access-vqfx9\") pod \"keystone-bootstrap-7rkmg\" (UID: \"ff2490f6-9bf3-41a3-a2a5-48e1e7d26738\") " pod="openstack/keystone-bootstrap-7rkmg" Dec 09 15:32:34 crc kubenswrapper[4716]: I1209 15:32:34.117599 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ff2490f6-9bf3-41a3-a2a5-48e1e7d26738-credential-keys\") pod \"keystone-bootstrap-7rkmg\" (UID: \"ff2490f6-9bf3-41a3-a2a5-48e1e7d26738\") " pod="openstack/keystone-bootstrap-7rkmg" Dec 09 15:32:34 crc kubenswrapper[4716]: I1209 15:32:34.117653 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ff2490f6-9bf3-41a3-a2a5-48e1e7d26738-fernet-keys\") pod \"keystone-bootstrap-7rkmg\" (UID: \"ff2490f6-9bf3-41a3-a2a5-48e1e7d26738\") " pod="openstack/keystone-bootstrap-7rkmg" Dec 09 15:32:34 crc kubenswrapper[4716]: I1209 15:32:34.117715 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff2490f6-9bf3-41a3-a2a5-48e1e7d26738-combined-ca-bundle\") pod \"keystone-bootstrap-7rkmg\" (UID: \"ff2490f6-9bf3-41a3-a2a5-48e1e7d26738\") " pod="openstack/keystone-bootstrap-7rkmg" Dec 09 15:32:34 crc kubenswrapper[4716]: I1209 15:32:34.117747 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ff2490f6-9bf3-41a3-a2a5-48e1e7d26738-scripts\") pod \"keystone-bootstrap-7rkmg\" (UID: \"ff2490f6-9bf3-41a3-a2a5-48e1e7d26738\") " pod="openstack/keystone-bootstrap-7rkmg" Dec 09 15:32:34 crc kubenswrapper[4716]: I1209 15:32:34.122655 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ff2490f6-9bf3-41a3-a2a5-48e1e7d26738-fernet-keys\") pod \"keystone-bootstrap-7rkmg\" (UID: \"ff2490f6-9bf3-41a3-a2a5-48e1e7d26738\") " pod="openstack/keystone-bootstrap-7rkmg" Dec 09 15:32:34 crc kubenswrapper[4716]: I1209 15:32:34.122809 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ff2490f6-9bf3-41a3-a2a5-48e1e7d26738-scripts\") pod \"keystone-bootstrap-7rkmg\" (UID: \"ff2490f6-9bf3-41a3-a2a5-48e1e7d26738\") " pod="openstack/keystone-bootstrap-7rkmg" Dec 09 15:32:34 crc kubenswrapper[4716]: I1209 15:32:34.123003 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ff2490f6-9bf3-41a3-a2a5-48e1e7d26738-credential-keys\") pod \"keystone-bootstrap-7rkmg\" (UID: \"ff2490f6-9bf3-41a3-a2a5-48e1e7d26738\") " pod="openstack/keystone-bootstrap-7rkmg" Dec 09 15:32:34 crc kubenswrapper[4716]: I1209 15:32:34.124121 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff2490f6-9bf3-41a3-a2a5-48e1e7d26738-config-data\") pod \"keystone-bootstrap-7rkmg\" (UID: \"ff2490f6-9bf3-41a3-a2a5-48e1e7d26738\") " pod="openstack/keystone-bootstrap-7rkmg" Dec 09 15:32:34 crc kubenswrapper[4716]: I1209 15:32:34.134554 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff2490f6-9bf3-41a3-a2a5-48e1e7d26738-combined-ca-bundle\") pod \"keystone-bootstrap-7rkmg\" (UID: \"ff2490f6-9bf3-41a3-a2a5-48e1e7d26738\") " pod="openstack/keystone-bootstrap-7rkmg" Dec 09 15:32:34 crc kubenswrapper[4716]: I1209 15:32:34.142603 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vqfx9\" (UniqueName: \"kubernetes.io/projected/ff2490f6-9bf3-41a3-a2a5-48e1e7d26738-kube-api-access-vqfx9\") pod \"keystone-bootstrap-7rkmg\" (UID: \"ff2490f6-9bf3-41a3-a2a5-48e1e7d26738\") " pod="openstack/keystone-bootstrap-7rkmg" Dec 09 15:32:34 crc kubenswrapper[4716]: I1209 15:32:34.246963 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-7rkmg" Dec 09 15:32:35 crc kubenswrapper[4716]: I1209 15:32:35.228549 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="06ef0151-4164-436b-b817-5522ac1b07dd" path="/var/lib/kubelet/pods/06ef0151-4164-436b-b817-5522ac1b07dd/volumes" Dec 09 15:32:39 crc kubenswrapper[4716]: I1209 15:32:39.107642 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-nwgsw" podUID="5e0192ce-fa22-42ad-9c07-5154aa5b3801" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.145:5353: connect: connection refused" Dec 09 15:32:39 crc kubenswrapper[4716]: I1209 15:32:39.899682 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 09 15:32:39 crc kubenswrapper[4716]: I1209 15:32:39.925359 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.050924 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5ebafd4-a555-4f4d-994a-49d65913d92a-scripts\") pod \"f5ebafd4-a555-4f4d-994a-49d65913d92a\" (UID: \"f5ebafd4-a555-4f4d-994a-49d65913d92a\") " Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.050993 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"509237a9-38cc-4268-adeb-2f655ec3da7b\" (UID: \"509237a9-38cc-4268-adeb-2f655ec3da7b\") " Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.051039 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f5ebafd4-a555-4f4d-994a-49d65913d92a-httpd-run\") pod \"f5ebafd4-a555-4f4d-994a-49d65913d92a\" (UID: \"f5ebafd4-a555-4f4d-994a-49d65913d92a\") " Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.051084 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f5ebafd4-a555-4f4d-994a-49d65913d92a-logs\") pod \"f5ebafd4-a555-4f4d-994a-49d65913d92a\" (UID: \"f5ebafd4-a555-4f4d-994a-49d65913d92a\") " Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.051137 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/509237a9-38cc-4268-adeb-2f655ec3da7b-logs\") pod \"509237a9-38cc-4268-adeb-2f655ec3da7b\" (UID: \"509237a9-38cc-4268-adeb-2f655ec3da7b\") " Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.051196 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5ebafd4-a555-4f4d-994a-49d65913d92a-combined-ca-bundle\") pod \"f5ebafd4-a555-4f4d-994a-49d65913d92a\" (UID: \"f5ebafd4-a555-4f4d-994a-49d65913d92a\") " Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.051284 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/509237a9-38cc-4268-adeb-2f655ec3da7b-scripts\") pod \"509237a9-38cc-4268-adeb-2f655ec3da7b\" (UID: \"509237a9-38cc-4268-adeb-2f655ec3da7b\") " Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.051311 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5ebafd4-a555-4f4d-994a-49d65913d92a-config-data\") pod \"f5ebafd4-a555-4f4d-994a-49d65913d92a\" (UID: \"f5ebafd4-a555-4f4d-994a-49d65913d92a\") " Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.051343 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/509237a9-38cc-4268-adeb-2f655ec3da7b-combined-ca-bundle\") pod \"509237a9-38cc-4268-adeb-2f655ec3da7b\" (UID: \"509237a9-38cc-4268-adeb-2f655ec3da7b\") " Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.051363 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j2l9v\" (UniqueName: \"kubernetes.io/projected/f5ebafd4-a555-4f4d-994a-49d65913d92a-kube-api-access-j2l9v\") pod \"f5ebafd4-a555-4f4d-994a-49d65913d92a\" (UID: \"f5ebafd4-a555-4f4d-994a-49d65913d92a\") " Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.051386 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"f5ebafd4-a555-4f4d-994a-49d65913d92a\" (UID: \"f5ebafd4-a555-4f4d-994a-49d65913d92a\") " Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.051472 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/509237a9-38cc-4268-adeb-2f655ec3da7b-config-data\") pod \"509237a9-38cc-4268-adeb-2f655ec3da7b\" (UID: \"509237a9-38cc-4268-adeb-2f655ec3da7b\") " Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.051487 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-788h8\" (UniqueName: \"kubernetes.io/projected/509237a9-38cc-4268-adeb-2f655ec3da7b-kube-api-access-788h8\") pod \"509237a9-38cc-4268-adeb-2f655ec3da7b\" (UID: \"509237a9-38cc-4268-adeb-2f655ec3da7b\") " Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.051604 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/509237a9-38cc-4268-adeb-2f655ec3da7b-httpd-run\") pod \"509237a9-38cc-4268-adeb-2f655ec3da7b\" (UID: \"509237a9-38cc-4268-adeb-2f655ec3da7b\") " Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.052755 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/509237a9-38cc-4268-adeb-2f655ec3da7b-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "509237a9-38cc-4268-adeb-2f655ec3da7b" (UID: "509237a9-38cc-4268-adeb-2f655ec3da7b"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.082934 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5ebafd4-a555-4f4d-994a-49d65913d92a-logs" (OuterVolumeSpecName: "logs") pod "f5ebafd4-a555-4f4d-994a-49d65913d92a" (UID: "f5ebafd4-a555-4f4d-994a-49d65913d92a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.083369 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5ebafd4-a555-4f4d-994a-49d65913d92a-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "f5ebafd4-a555-4f4d-994a-49d65913d92a" (UID: "f5ebafd4-a555-4f4d-994a-49d65913d92a"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.083502 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/509237a9-38cc-4268-adeb-2f655ec3da7b-logs" (OuterVolumeSpecName: "logs") pod "509237a9-38cc-4268-adeb-2f655ec3da7b" (UID: "509237a9-38cc-4268-adeb-2f655ec3da7b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.088214 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5ebafd4-a555-4f4d-994a-49d65913d92a-scripts" (OuterVolumeSpecName: "scripts") pod "f5ebafd4-a555-4f4d-994a-49d65913d92a" (UID: "f5ebafd4-a555-4f4d-994a-49d65913d92a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.089274 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/509237a9-38cc-4268-adeb-2f655ec3da7b-scripts" (OuterVolumeSpecName: "scripts") pod "509237a9-38cc-4268-adeb-2f655ec3da7b" (UID: "509237a9-38cc-4268-adeb-2f655ec3da7b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.125780 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "glance") pod "f5ebafd4-a555-4f4d-994a-49d65913d92a" (UID: "f5ebafd4-a555-4f4d-994a-49d65913d92a"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.125788 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "glance") pod "509237a9-38cc-4268-adeb-2f655ec3da7b" (UID: "509237a9-38cc-4268-adeb-2f655ec3da7b"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.137840 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/509237a9-38cc-4268-adeb-2f655ec3da7b-kube-api-access-788h8" (OuterVolumeSpecName: "kube-api-access-788h8") pod "509237a9-38cc-4268-adeb-2f655ec3da7b" (UID: "509237a9-38cc-4268-adeb-2f655ec3da7b"). InnerVolumeSpecName "kube-api-access-788h8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.152250 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f5ebafd4-a555-4f4d-994a-49d65913d92a-kube-api-access-j2l9v" (OuterVolumeSpecName: "kube-api-access-j2l9v") pod "f5ebafd4-a555-4f4d-994a-49d65913d92a" (UID: "f5ebafd4-a555-4f4d-994a-49d65913d92a"). InnerVolumeSpecName "kube-api-access-j2l9v". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.157139 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j2l9v\" (UniqueName: \"kubernetes.io/projected/f5ebafd4-a555-4f4d-994a-49d65913d92a-kube-api-access-j2l9v\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.157197 4716 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.157213 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-788h8\" (UniqueName: \"kubernetes.io/projected/509237a9-38cc-4268-adeb-2f655ec3da7b-kube-api-access-788h8\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.157403 4716 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/509237a9-38cc-4268-adeb-2f655ec3da7b-httpd-run\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.157420 4716 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5ebafd4-a555-4f4d-994a-49d65913d92a-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.157441 4716 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.157453 4716 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f5ebafd4-a555-4f4d-994a-49d65913d92a-httpd-run\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.157465 4716 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f5ebafd4-a555-4f4d-994a-49d65913d92a-logs\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.157476 4716 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/509237a9-38cc-4268-adeb-2f655ec3da7b-logs\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.157486 4716 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/509237a9-38cc-4268-adeb-2f655ec3da7b-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.204248 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"509237a9-38cc-4268-adeb-2f655ec3da7b","Type":"ContainerDied","Data":"2f22137560541db24bb0ba11efba2ffa2238a17b0343b6b1fb8d264edcdd1390"} Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.204393 4716 scope.go:117] "RemoveContainer" containerID="5a4d39f346c0d90323678333e30f9e18485edd09b01c4e5f58862f529375d9b9" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.204571 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.214468 4716 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.223008 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5ebafd4-a555-4f4d-994a-49d65913d92a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f5ebafd4-a555-4f4d-994a-49d65913d92a" (UID: "f5ebafd4-a555-4f4d-994a-49d65913d92a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.228589 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f5ebafd4-a555-4f4d-994a-49d65913d92a","Type":"ContainerDied","Data":"0f71f9ea6bd8a988917caaf71c2ea9984a3ab6477371a2d4bd22db09e2a37cce"} Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.228711 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.236584 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/509237a9-38cc-4268-adeb-2f655ec3da7b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "509237a9-38cc-4268-adeb-2f655ec3da7b" (UID: "509237a9-38cc-4268-adeb-2f655ec3da7b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.244045 4716 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.259943 4716 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.260005 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5ebafd4-a555-4f4d-994a-49d65913d92a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.260025 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/509237a9-38cc-4268-adeb-2f655ec3da7b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.260036 4716 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.262560 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/509237a9-38cc-4268-adeb-2f655ec3da7b-config-data" (OuterVolumeSpecName: "config-data") pod "509237a9-38cc-4268-adeb-2f655ec3da7b" (UID: "509237a9-38cc-4268-adeb-2f655ec3da7b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.267227 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5ebafd4-a555-4f4d-994a-49d65913d92a-config-data" (OuterVolumeSpecName: "config-data") pod "f5ebafd4-a555-4f4d-994a-49d65913d92a" (UID: "f5ebafd4-a555-4f4d-994a-49d65913d92a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.362229 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5ebafd4-a555-4f4d-994a-49d65913d92a-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.362271 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/509237a9-38cc-4268-adeb-2f655ec3da7b-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.552037 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.563468 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.592740 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.612743 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.626260 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Dec 09 15:32:40 crc kubenswrapper[4716]: E1209 15:32:40.626831 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="509237a9-38cc-4268-adeb-2f655ec3da7b" containerName="glance-httpd" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.626852 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="509237a9-38cc-4268-adeb-2f655ec3da7b" containerName="glance-httpd" Dec 09 15:32:40 crc kubenswrapper[4716]: E1209 15:32:40.626876 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5ebafd4-a555-4f4d-994a-49d65913d92a" containerName="glance-httpd" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.626883 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5ebafd4-a555-4f4d-994a-49d65913d92a" containerName="glance-httpd" Dec 09 15:32:40 crc kubenswrapper[4716]: E1209 15:32:40.626913 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="509237a9-38cc-4268-adeb-2f655ec3da7b" containerName="glance-log" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.626919 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="509237a9-38cc-4268-adeb-2f655ec3da7b" containerName="glance-log" Dec 09 15:32:40 crc kubenswrapper[4716]: E1209 15:32:40.626930 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5ebafd4-a555-4f4d-994a-49d65913d92a" containerName="glance-log" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.626936 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5ebafd4-a555-4f4d-994a-49d65913d92a" containerName="glance-log" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.627152 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="509237a9-38cc-4268-adeb-2f655ec3da7b" containerName="glance-httpd" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.627169 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5ebafd4-a555-4f4d-994a-49d65913d92a" containerName="glance-log" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.627180 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5ebafd4-a555-4f4d-994a-49d65913d92a" containerName="glance-httpd" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.627193 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="509237a9-38cc-4268-adeb-2f655ec3da7b" containerName="glance-log" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.628371 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.634143 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.634203 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.634402 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-df9ww" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.634498 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.641645 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.659927 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.661750 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.668133 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.668334 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.670565 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/381de784-1e86-405c-84a0-169fd93d2ef2-config-data\") pod \"glance-default-external-api-0\" (UID: \"381de784-1e86-405c-84a0-169fd93d2ef2\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.670602 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/381de784-1e86-405c-84a0-169fd93d2ef2-logs\") pod \"glance-default-external-api-0\" (UID: \"381de784-1e86-405c-84a0-169fd93d2ef2\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.670650 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"381de784-1e86-405c-84a0-169fd93d2ef2\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.670694 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/381de784-1e86-405c-84a0-169fd93d2ef2-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"381de784-1e86-405c-84a0-169fd93d2ef2\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.670805 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/381de784-1e86-405c-84a0-169fd93d2ef2-scripts\") pod \"glance-default-external-api-0\" (UID: \"381de784-1e86-405c-84a0-169fd93d2ef2\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.670838 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xxrxc\" (UniqueName: \"kubernetes.io/projected/381de784-1e86-405c-84a0-169fd93d2ef2-kube-api-access-xxrxc\") pod \"glance-default-external-api-0\" (UID: \"381de784-1e86-405c-84a0-169fd93d2ef2\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.670896 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/381de784-1e86-405c-84a0-169fd93d2ef2-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"381de784-1e86-405c-84a0-169fd93d2ef2\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.670980 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/381de784-1e86-405c-84a0-169fd93d2ef2-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"381de784-1e86-405c-84a0-169fd93d2ef2\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.686590 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.773230 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/381de784-1e86-405c-84a0-169fd93d2ef2-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"381de784-1e86-405c-84a0-169fd93d2ef2\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.773336 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c0b9015f-858a-47f4-b619-a47e54c09d03-logs\") pod \"glance-default-internal-api-0\" (UID: \"c0b9015f-858a-47f4-b619-a47e54c09d03\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.773382 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c0b9015f-858a-47f4-b619-a47e54c09d03-scripts\") pod \"glance-default-internal-api-0\" (UID: \"c0b9015f-858a-47f4-b619-a47e54c09d03\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.773417 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/381de784-1e86-405c-84a0-169fd93d2ef2-scripts\") pod \"glance-default-external-api-0\" (UID: \"381de784-1e86-405c-84a0-169fd93d2ef2\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.773440 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xxrxc\" (UniqueName: \"kubernetes.io/projected/381de784-1e86-405c-84a0-169fd93d2ef2-kube-api-access-xxrxc\") pod \"glance-default-external-api-0\" (UID: \"381de784-1e86-405c-84a0-169fd93d2ef2\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.773499 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/381de784-1e86-405c-84a0-169fd93d2ef2-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"381de784-1e86-405c-84a0-169fd93d2ef2\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.773535 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hzqqq\" (UniqueName: \"kubernetes.io/projected/c0b9015f-858a-47f4-b619-a47e54c09d03-kube-api-access-hzqqq\") pod \"glance-default-internal-api-0\" (UID: \"c0b9015f-858a-47f4-b619-a47e54c09d03\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.773588 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c0b9015f-858a-47f4-b619-a47e54c09d03-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"c0b9015f-858a-47f4-b619-a47e54c09d03\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.773644 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0b9015f-858a-47f4-b619-a47e54c09d03-config-data\") pod \"glance-default-internal-api-0\" (UID: \"c0b9015f-858a-47f4-b619-a47e54c09d03\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.773676 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/381de784-1e86-405c-84a0-169fd93d2ef2-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"381de784-1e86-405c-84a0-169fd93d2ef2\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.773715 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/381de784-1e86-405c-84a0-169fd93d2ef2-config-data\") pod \"glance-default-external-api-0\" (UID: \"381de784-1e86-405c-84a0-169fd93d2ef2\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.773742 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/381de784-1e86-405c-84a0-169fd93d2ef2-logs\") pod \"glance-default-external-api-0\" (UID: \"381de784-1e86-405c-84a0-169fd93d2ef2\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.773771 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c0b9015f-858a-47f4-b619-a47e54c09d03-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"c0b9015f-858a-47f4-b619-a47e54c09d03\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.773804 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"381de784-1e86-405c-84a0-169fd93d2ef2\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.773828 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0b9015f-858a-47f4-b619-a47e54c09d03-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"c0b9015f-858a-47f4-b619-a47e54c09d03\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.773863 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"c0b9015f-858a-47f4-b619-a47e54c09d03\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.774608 4716 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"381de784-1e86-405c-84a0-169fd93d2ef2\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/glance-default-external-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.775178 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/381de784-1e86-405c-84a0-169fd93d2ef2-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"381de784-1e86-405c-84a0-169fd93d2ef2\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.775227 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/381de784-1e86-405c-84a0-169fd93d2ef2-logs\") pod \"glance-default-external-api-0\" (UID: \"381de784-1e86-405c-84a0-169fd93d2ef2\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.781932 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/381de784-1e86-405c-84a0-169fd93d2ef2-scripts\") pod \"glance-default-external-api-0\" (UID: \"381de784-1e86-405c-84a0-169fd93d2ef2\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.782536 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/381de784-1e86-405c-84a0-169fd93d2ef2-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"381de784-1e86-405c-84a0-169fd93d2ef2\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.783314 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/381de784-1e86-405c-84a0-169fd93d2ef2-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"381de784-1e86-405c-84a0-169fd93d2ef2\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.797808 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xxrxc\" (UniqueName: \"kubernetes.io/projected/381de784-1e86-405c-84a0-169fd93d2ef2-kube-api-access-xxrxc\") pod \"glance-default-external-api-0\" (UID: \"381de784-1e86-405c-84a0-169fd93d2ef2\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.798522 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/381de784-1e86-405c-84a0-169fd93d2ef2-config-data\") pod \"glance-default-external-api-0\" (UID: \"381de784-1e86-405c-84a0-169fd93d2ef2\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.817011 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"381de784-1e86-405c-84a0-169fd93d2ef2\") " pod="openstack/glance-default-external-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.877373 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c0b9015f-858a-47f4-b619-a47e54c09d03-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"c0b9015f-858a-47f4-b619-a47e54c09d03\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.878052 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0b9015f-858a-47f4-b619-a47e54c09d03-config-data\") pod \"glance-default-internal-api-0\" (UID: \"c0b9015f-858a-47f4-b619-a47e54c09d03\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.878233 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c0b9015f-858a-47f4-b619-a47e54c09d03-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"c0b9015f-858a-47f4-b619-a47e54c09d03\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.878363 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0b9015f-858a-47f4-b619-a47e54c09d03-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"c0b9015f-858a-47f4-b619-a47e54c09d03\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.878480 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"c0b9015f-858a-47f4-b619-a47e54c09d03\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.878713 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c0b9015f-858a-47f4-b619-a47e54c09d03-logs\") pod \"glance-default-internal-api-0\" (UID: \"c0b9015f-858a-47f4-b619-a47e54c09d03\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.878842 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c0b9015f-858a-47f4-b619-a47e54c09d03-scripts\") pod \"glance-default-internal-api-0\" (UID: \"c0b9015f-858a-47f4-b619-a47e54c09d03\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.879031 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hzqqq\" (UniqueName: \"kubernetes.io/projected/c0b9015f-858a-47f4-b619-a47e54c09d03-kube-api-access-hzqqq\") pod \"glance-default-internal-api-0\" (UID: \"c0b9015f-858a-47f4-b619-a47e54c09d03\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.881812 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c0b9015f-858a-47f4-b619-a47e54c09d03-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"c0b9015f-858a-47f4-b619-a47e54c09d03\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.881979 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c0b9015f-858a-47f4-b619-a47e54c09d03-logs\") pod \"glance-default-internal-api-0\" (UID: \"c0b9015f-858a-47f4-b619-a47e54c09d03\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.882218 4716 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"c0b9015f-858a-47f4-b619-a47e54c09d03\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-internal-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.884173 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0b9015f-858a-47f4-b619-a47e54c09d03-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"c0b9015f-858a-47f4-b619-a47e54c09d03\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.886562 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c0b9015f-858a-47f4-b619-a47e54c09d03-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"c0b9015f-858a-47f4-b619-a47e54c09d03\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.887357 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c0b9015f-858a-47f4-b619-a47e54c09d03-scripts\") pod \"glance-default-internal-api-0\" (UID: \"c0b9015f-858a-47f4-b619-a47e54c09d03\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.901969 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hzqqq\" (UniqueName: \"kubernetes.io/projected/c0b9015f-858a-47f4-b619-a47e54c09d03-kube-api-access-hzqqq\") pod \"glance-default-internal-api-0\" (UID: \"c0b9015f-858a-47f4-b619-a47e54c09d03\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.905609 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0b9015f-858a-47f4-b619-a47e54c09d03-config-data\") pod \"glance-default-internal-api-0\" (UID: \"c0b9015f-858a-47f4-b619-a47e54c09d03\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.922798 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"c0b9015f-858a-47f4-b619-a47e54c09d03\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.950506 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 09 15:32:40 crc kubenswrapper[4716]: I1209 15:32:40.992058 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 09 15:32:41 crc kubenswrapper[4716]: I1209 15:32:41.230946 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="509237a9-38cc-4268-adeb-2f655ec3da7b" path="/var/lib/kubelet/pods/509237a9-38cc-4268-adeb-2f655ec3da7b/volumes" Dec 09 15:32:41 crc kubenswrapper[4716]: I1209 15:32:41.231894 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f5ebafd4-a555-4f4d-994a-49d65913d92a" path="/var/lib/kubelet/pods/f5ebafd4-a555-4f4d-994a-49d65913d92a/volumes" Dec 09 15:32:43 crc kubenswrapper[4716]: I1209 15:32:43.271072 4716 generic.go:334] "Generic (PLEG): container finished" podID="3aba2daa-0fe3-419f-a361-1be829c1e3d0" containerID="a98698868ee6063344f074ce17ef56b78cebc9dffd3e4652fa4aacae3cb4e5aa" exitCode=0 Dec 09 15:32:43 crc kubenswrapper[4716]: I1209 15:32:43.271148 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-2rwfp" event={"ID":"3aba2daa-0fe3-419f-a361-1be829c1e3d0","Type":"ContainerDied","Data":"a98698868ee6063344f074ce17ef56b78cebc9dffd3e4652fa4aacae3cb4e5aa"} Dec 09 15:32:47 crc kubenswrapper[4716]: I1209 15:32:47.317953 4716 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280] : Timed out while waiting for systemd to remove kubepods-besteffort-pod2c70bfd2_db44_4c93_ba2f_0e0bbaf3f280.slice" Dec 09 15:32:47 crc kubenswrapper[4716]: E1209 15:32:47.318489 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to delete cgroup paths for [kubepods besteffort pod2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280] : unable to destroy cgroup paths for cgroup [kubepods besteffort pod2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280] : Timed out while waiting for systemd to remove kubepods-besteffort-pod2c70bfd2_db44_4c93_ba2f_0e0bbaf3f280.slice" pod="openstack/dnsmasq-dns-764c5664d7-ww9d6" podUID="2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280" Dec 09 15:32:47 crc kubenswrapper[4716]: I1209 15:32:47.922051 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 15:32:47 crc kubenswrapper[4716]: I1209 15:32:47.922127 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 15:32:48 crc kubenswrapper[4716]: E1209 15:32:48.162655 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified" Dec 09 15:32:48 crc kubenswrapper[4716]: E1209 15:32:48.162892 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2qchn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-xr25h_openstack(e1fa7713-f67e-45a2-81c0-73a56280f744): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 15:32:48 crc kubenswrapper[4716]: E1209 15:32:48.164035 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/heat-db-sync-xr25h" podUID="e1fa7713-f67e-45a2-81c0-73a56280f744" Dec 09 15:32:48 crc kubenswrapper[4716]: I1209 15:32:48.324933 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-764c5664d7-ww9d6" Dec 09 15:32:48 crc kubenswrapper[4716]: E1209 15:32:48.329181 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified\\\"\"" pod="openstack/heat-db-sync-xr25h" podUID="e1fa7713-f67e-45a2-81c0-73a56280f744" Dec 09 15:32:48 crc kubenswrapper[4716]: I1209 15:32:48.388371 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-ww9d6"] Dec 09 15:32:48 crc kubenswrapper[4716]: I1209 15:32:48.400819 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-ww9d6"] Dec 09 15:32:48 crc kubenswrapper[4716]: E1209 15:32:48.719082 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Dec 09 15:32:48 crc kubenswrapper[4716]: E1209 15:32:48.719265 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jshx4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-64jmg_openstack(8fd29da9-c781-4e6d-857c-17f84d72c639): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 15:32:48 crc kubenswrapper[4716]: E1209 15:32:48.720874 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-64jmg" podUID="8fd29da9-c781-4e6d-857c-17f84d72c639" Dec 09 15:32:49 crc kubenswrapper[4716]: I1209 15:32:49.107844 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-nwgsw" podUID="5e0192ce-fa22-42ad-9c07-5154aa5b3801" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.145:5353: i/o timeout" Dec 09 15:32:49 crc kubenswrapper[4716]: I1209 15:32:49.108589 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-698758b865-nwgsw" Dec 09 15:32:49 crc kubenswrapper[4716]: I1209 15:32:49.230259 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280" path="/var/lib/kubelet/pods/2c70bfd2-db44-4c93-ba2f-0e0bbaf3f280/volumes" Dec 09 15:32:49 crc kubenswrapper[4716]: E1209 15:32:49.341497 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-64jmg" podUID="8fd29da9-c781-4e6d-857c-17f84d72c639" Dec 09 15:32:54 crc kubenswrapper[4716]: I1209 15:32:54.108998 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-nwgsw" podUID="5e0192ce-fa22-42ad-9c07-5154aa5b3801" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.145:5353: i/o timeout" Dec 09 15:32:57 crc kubenswrapper[4716]: I1209 15:32:57.418999 4716 scope.go:117] "RemoveContainer" containerID="82950591ae088f98c5f5fadd7a267dbb80596c0d8800d2ca1c5dfce7cefd7776" Dec 09 15:32:57 crc kubenswrapper[4716]: I1209 15:32:57.423186 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-nwgsw" event={"ID":"5e0192ce-fa22-42ad-9c07-5154aa5b3801","Type":"ContainerDied","Data":"70ceb9de9d224e1acc5b4ab6f7fa8104ac749502830c495198e31caba5c077f2"} Dec 09 15:32:57 crc kubenswrapper[4716]: I1209 15:32:57.423243 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="70ceb9de9d224e1acc5b4ab6f7fa8104ac749502830c495198e31caba5c077f2" Dec 09 15:32:57 crc kubenswrapper[4716]: I1209 15:32:57.425902 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-2rwfp" event={"ID":"3aba2daa-0fe3-419f-a361-1be829c1e3d0","Type":"ContainerDied","Data":"0046b8ae3f49110f1d74e337b9eb52a787332d8884560f52842f03d9374a22db"} Dec 09 15:32:57 crc kubenswrapper[4716]: I1209 15:32:57.425969 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0046b8ae3f49110f1d74e337b9eb52a787332d8884560f52842f03d9374a22db" Dec 09 15:32:57 crc kubenswrapper[4716]: E1209 15:32:57.494895 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Dec 09 15:32:57 crc kubenswrapper[4716]: E1209 15:32:57.495076 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5644v,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-sqhrf_openstack(c31e7c21-64fd-4bb2-b165-df1743489363): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 15:32:57 crc kubenswrapper[4716]: E1209 15:32:57.496336 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-sqhrf" podUID="c31e7c21-64fd-4bb2-b165-df1743489363" Dec 09 15:32:57 crc kubenswrapper[4716]: I1209 15:32:57.696878 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-2rwfp" Dec 09 15:32:57 crc kubenswrapper[4716]: I1209 15:32:57.704003 4716 scope.go:117] "RemoveContainer" containerID="f2acb5f601f833afac23d9f9d226edc76ceb1e8b8d5d7adb47d6c17e30896a74" Dec 09 15:32:57 crc kubenswrapper[4716]: I1209 15:32:57.741356 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-nwgsw" Dec 09 15:32:57 crc kubenswrapper[4716]: I1209 15:32:57.755197 4716 scope.go:117] "RemoveContainer" containerID="f2ded43c610cad8b86baa411126311d8b93c7cbafa6919c02f0ee3652a9fe1d9" Dec 09 15:32:57 crc kubenswrapper[4716]: I1209 15:32:57.784222 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8d75v\" (UniqueName: \"kubernetes.io/projected/5e0192ce-fa22-42ad-9c07-5154aa5b3801-kube-api-access-8d75v\") pod \"5e0192ce-fa22-42ad-9c07-5154aa5b3801\" (UID: \"5e0192ce-fa22-42ad-9c07-5154aa5b3801\") " Dec 09 15:32:57 crc kubenswrapper[4716]: I1209 15:32:57.784294 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5e0192ce-fa22-42ad-9c07-5154aa5b3801-ovsdbserver-sb\") pod \"5e0192ce-fa22-42ad-9c07-5154aa5b3801\" (UID: \"5e0192ce-fa22-42ad-9c07-5154aa5b3801\") " Dec 09 15:32:57 crc kubenswrapper[4716]: I1209 15:32:57.784367 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/3aba2daa-0fe3-419f-a361-1be829c1e3d0-config\") pod \"3aba2daa-0fe3-419f-a361-1be829c1e3d0\" (UID: \"3aba2daa-0fe3-419f-a361-1be829c1e3d0\") " Dec 09 15:32:57 crc kubenswrapper[4716]: I1209 15:32:57.784466 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5e0192ce-fa22-42ad-9c07-5154aa5b3801-ovsdbserver-nb\") pod \"5e0192ce-fa22-42ad-9c07-5154aa5b3801\" (UID: \"5e0192ce-fa22-42ad-9c07-5154aa5b3801\") " Dec 09 15:32:57 crc kubenswrapper[4716]: I1209 15:32:57.784544 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5e0192ce-fa22-42ad-9c07-5154aa5b3801-dns-svc\") pod \"5e0192ce-fa22-42ad-9c07-5154aa5b3801\" (UID: \"5e0192ce-fa22-42ad-9c07-5154aa5b3801\") " Dec 09 15:32:57 crc kubenswrapper[4716]: I1209 15:32:57.784590 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e0192ce-fa22-42ad-9c07-5154aa5b3801-config\") pod \"5e0192ce-fa22-42ad-9c07-5154aa5b3801\" (UID: \"5e0192ce-fa22-42ad-9c07-5154aa5b3801\") " Dec 09 15:32:57 crc kubenswrapper[4716]: I1209 15:32:57.784644 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2vqb4\" (UniqueName: \"kubernetes.io/projected/3aba2daa-0fe3-419f-a361-1be829c1e3d0-kube-api-access-2vqb4\") pod \"3aba2daa-0fe3-419f-a361-1be829c1e3d0\" (UID: \"3aba2daa-0fe3-419f-a361-1be829c1e3d0\") " Dec 09 15:32:57 crc kubenswrapper[4716]: I1209 15:32:57.784675 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3aba2daa-0fe3-419f-a361-1be829c1e3d0-combined-ca-bundle\") pod \"3aba2daa-0fe3-419f-a361-1be829c1e3d0\" (UID: \"3aba2daa-0fe3-419f-a361-1be829c1e3d0\") " Dec 09 15:32:57 crc kubenswrapper[4716]: I1209 15:32:57.808224 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3aba2daa-0fe3-419f-a361-1be829c1e3d0-kube-api-access-2vqb4" (OuterVolumeSpecName: "kube-api-access-2vqb4") pod "3aba2daa-0fe3-419f-a361-1be829c1e3d0" (UID: "3aba2daa-0fe3-419f-a361-1be829c1e3d0"). InnerVolumeSpecName "kube-api-access-2vqb4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:32:57 crc kubenswrapper[4716]: I1209 15:32:57.813902 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5e0192ce-fa22-42ad-9c07-5154aa5b3801-kube-api-access-8d75v" (OuterVolumeSpecName: "kube-api-access-8d75v") pod "5e0192ce-fa22-42ad-9c07-5154aa5b3801" (UID: "5e0192ce-fa22-42ad-9c07-5154aa5b3801"). InnerVolumeSpecName "kube-api-access-8d75v". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:32:57 crc kubenswrapper[4716]: I1209 15:32:57.886045 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3aba2daa-0fe3-419f-a361-1be829c1e3d0-config" (OuterVolumeSpecName: "config") pod "3aba2daa-0fe3-419f-a361-1be829c1e3d0" (UID: "3aba2daa-0fe3-419f-a361-1be829c1e3d0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:32:57 crc kubenswrapper[4716]: I1209 15:32:57.886769 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/3aba2daa-0fe3-419f-a361-1be829c1e3d0-config\") pod \"3aba2daa-0fe3-419f-a361-1be829c1e3d0\" (UID: \"3aba2daa-0fe3-419f-a361-1be829c1e3d0\") " Dec 09 15:32:57 crc kubenswrapper[4716]: W1209 15:32:57.887230 4716 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/3aba2daa-0fe3-419f-a361-1be829c1e3d0/volumes/kubernetes.io~secret/config Dec 09 15:32:57 crc kubenswrapper[4716]: I1209 15:32:57.887679 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3aba2daa-0fe3-419f-a361-1be829c1e3d0-config" (OuterVolumeSpecName: "config") pod "3aba2daa-0fe3-419f-a361-1be829c1e3d0" (UID: "3aba2daa-0fe3-419f-a361-1be829c1e3d0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:32:57 crc kubenswrapper[4716]: I1209 15:32:57.887687 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2vqb4\" (UniqueName: \"kubernetes.io/projected/3aba2daa-0fe3-419f-a361-1be829c1e3d0-kube-api-access-2vqb4\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:57 crc kubenswrapper[4716]: I1209 15:32:57.887736 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8d75v\" (UniqueName: \"kubernetes.io/projected/5e0192ce-fa22-42ad-9c07-5154aa5b3801-kube-api-access-8d75v\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:57 crc kubenswrapper[4716]: I1209 15:32:57.890441 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5e0192ce-fa22-42ad-9c07-5154aa5b3801-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "5e0192ce-fa22-42ad-9c07-5154aa5b3801" (UID: "5e0192ce-fa22-42ad-9c07-5154aa5b3801"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:32:57 crc kubenswrapper[4716]: I1209 15:32:57.896903 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3aba2daa-0fe3-419f-a361-1be829c1e3d0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3aba2daa-0fe3-419f-a361-1be829c1e3d0" (UID: "3aba2daa-0fe3-419f-a361-1be829c1e3d0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:32:57 crc kubenswrapper[4716]: I1209 15:32:57.929307 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5e0192ce-fa22-42ad-9c07-5154aa5b3801-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "5e0192ce-fa22-42ad-9c07-5154aa5b3801" (UID: "5e0192ce-fa22-42ad-9c07-5154aa5b3801"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:32:57 crc kubenswrapper[4716]: I1209 15:32:57.933202 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5e0192ce-fa22-42ad-9c07-5154aa5b3801-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "5e0192ce-fa22-42ad-9c07-5154aa5b3801" (UID: "5e0192ce-fa22-42ad-9c07-5154aa5b3801"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:32:57 crc kubenswrapper[4716]: I1209 15:32:57.943057 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5e0192ce-fa22-42ad-9c07-5154aa5b3801-config" (OuterVolumeSpecName: "config") pod "5e0192ce-fa22-42ad-9c07-5154aa5b3801" (UID: "5e0192ce-fa22-42ad-9c07-5154aa5b3801"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:32:57 crc kubenswrapper[4716]: I1209 15:32:57.973135 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-7rkmg"] Dec 09 15:32:57 crc kubenswrapper[4716]: I1209 15:32:57.991379 4716 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5e0192ce-fa22-42ad-9c07-5154aa5b3801-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:57 crc kubenswrapper[4716]: I1209 15:32:57.991421 4716 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5e0192ce-fa22-42ad-9c07-5154aa5b3801-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:57 crc kubenswrapper[4716]: I1209 15:32:57.991434 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e0192ce-fa22-42ad-9c07-5154aa5b3801-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:57 crc kubenswrapper[4716]: I1209 15:32:57.991445 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3aba2daa-0fe3-419f-a361-1be829c1e3d0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:57 crc kubenswrapper[4716]: I1209 15:32:57.991457 4716 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5e0192ce-fa22-42ad-9c07-5154aa5b3801-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:57 crc kubenswrapper[4716]: I1209 15:32:57.991468 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/3aba2daa-0fe3-419f-a361-1be829c1e3d0-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:32:58 crc kubenswrapper[4716]: I1209 15:32:58.142939 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 09 15:32:58 crc kubenswrapper[4716]: I1209 15:32:58.279948 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 09 15:32:58 crc kubenswrapper[4716]: I1209 15:32:58.472337 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-7rkmg" event={"ID":"ff2490f6-9bf3-41a3-a2a5-48e1e7d26738","Type":"ContainerStarted","Data":"ae8761e7087d4c45bc84a019ea59e6f13963ed8ab357ecf369467de782ebdc56"} Dec 09 15:32:58 crc kubenswrapper[4716]: I1209 15:32:58.472392 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-7rkmg" event={"ID":"ff2490f6-9bf3-41a3-a2a5-48e1e7d26738","Type":"ContainerStarted","Data":"f0339704c1566cf5fe3e24323d3aae4dc64cee8fb99309372d066265b5ded410"} Dec 09 15:32:58 crc kubenswrapper[4716]: I1209 15:32:58.475090 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-d4ccd" event={"ID":"2c59c8f4-e888-4345-8cdd-5581ef0f801c","Type":"ContainerStarted","Data":"7581187e2cfe62902b026e949dc19004b041c21d3ec7d4fa8841f43d63de9847"} Dec 09 15:32:58 crc kubenswrapper[4716]: I1209 15:32:58.485750 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9c5e102f-3947-4eae-a8de-78bb48d177e9","Type":"ContainerStarted","Data":"343cc0c9f27ffe5c2dca7847180fab77d8b198bf368d5676828cb2a194c57343"} Dec 09 15:32:58 crc kubenswrapper[4716]: I1209 15:32:58.492331 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"381de784-1e86-405c-84a0-169fd93d2ef2","Type":"ContainerStarted","Data":"64706d0a23ca94b52a9cae99bcfe6bf65826545b4dd6b70b66eef90ff19414cf"} Dec 09 15:32:58 crc kubenswrapper[4716]: I1209 15:32:58.496545 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c0b9015f-858a-47f4-b619-a47e54c09d03","Type":"ContainerStarted","Data":"9145934aa78d71cdc94b5cda9a8287a5e7c863d6195f4fd7d792c252ef090cfc"} Dec 09 15:32:58 crc kubenswrapper[4716]: I1209 15:32:58.496598 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-2rwfp" Dec 09 15:32:58 crc kubenswrapper[4716]: I1209 15:32:58.496612 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-nwgsw" Dec 09 15:32:58 crc kubenswrapper[4716]: I1209 15:32:58.500935 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-7rkmg" podStartSLOduration=25.500914824 podStartE2EDuration="25.500914824s" podCreationTimestamp="2025-12-09 15:32:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:32:58.492841231 +0000 UTC m=+1465.647585219" watchObservedRunningTime="2025-12-09 15:32:58.500914824 +0000 UTC m=+1465.655658812" Dec 09 15:32:58 crc kubenswrapper[4716]: E1209 15:32:58.500956 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-sqhrf" podUID="c31e7c21-64fd-4bb2-b165-df1743489363" Dec 09 15:32:58 crc kubenswrapper[4716]: I1209 15:32:58.580903 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-d4ccd" podStartSLOduration=12.310134448 podStartE2EDuration="40.580877588s" podCreationTimestamp="2025-12-09 15:32:18 +0000 UTC" firstStartedPulling="2025-12-09 15:32:20.440797662 +0000 UTC m=+1427.595541650" lastFinishedPulling="2025-12-09 15:32:48.711540802 +0000 UTC m=+1455.866284790" observedRunningTime="2025-12-09 15:32:58.514485705 +0000 UTC m=+1465.669229703" watchObservedRunningTime="2025-12-09 15:32:58.580877588 +0000 UTC m=+1465.735621576" Dec 09 15:32:58 crc kubenswrapper[4716]: I1209 15:32:58.612766 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-nwgsw"] Dec 09 15:32:58 crc kubenswrapper[4716]: I1209 15:32:58.625286 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-698758b865-nwgsw"] Dec 09 15:32:58 crc kubenswrapper[4716]: I1209 15:32:58.905777 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-dj8vd"] Dec 09 15:32:58 crc kubenswrapper[4716]: E1209 15:32:58.906268 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e0192ce-fa22-42ad-9c07-5154aa5b3801" containerName="dnsmasq-dns" Dec 09 15:32:58 crc kubenswrapper[4716]: I1209 15:32:58.906282 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e0192ce-fa22-42ad-9c07-5154aa5b3801" containerName="dnsmasq-dns" Dec 09 15:32:58 crc kubenswrapper[4716]: E1209 15:32:58.906308 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3aba2daa-0fe3-419f-a361-1be829c1e3d0" containerName="neutron-db-sync" Dec 09 15:32:58 crc kubenswrapper[4716]: I1209 15:32:58.906314 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="3aba2daa-0fe3-419f-a361-1be829c1e3d0" containerName="neutron-db-sync" Dec 09 15:32:58 crc kubenswrapper[4716]: E1209 15:32:58.906339 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e0192ce-fa22-42ad-9c07-5154aa5b3801" containerName="init" Dec 09 15:32:58 crc kubenswrapper[4716]: I1209 15:32:58.906345 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e0192ce-fa22-42ad-9c07-5154aa5b3801" containerName="init" Dec 09 15:32:58 crc kubenswrapper[4716]: I1209 15:32:58.906651 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="5e0192ce-fa22-42ad-9c07-5154aa5b3801" containerName="dnsmasq-dns" Dec 09 15:32:58 crc kubenswrapper[4716]: I1209 15:32:58.906694 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="3aba2daa-0fe3-419f-a361-1be829c1e3d0" containerName="neutron-db-sync" Dec 09 15:32:58 crc kubenswrapper[4716]: I1209 15:32:58.908492 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55f844cf75-dj8vd" Dec 09 15:32:58 crc kubenswrapper[4716]: I1209 15:32:58.971702 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-dj8vd"] Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.058032 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-f86fdcb78-t5sms"] Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.061827 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-f86fdcb78-t5sms" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.068585 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.068945 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.069071 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.071033 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8e9d87d9-4356-4ed5-b3ac-8e2f1f851220-config\") pod \"dnsmasq-dns-55f844cf75-dj8vd\" (UID: \"8e9d87d9-4356-4ed5-b3ac-8e2f1f851220\") " pod="openstack/dnsmasq-dns-55f844cf75-dj8vd" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.071229 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6zzw2\" (UniqueName: \"kubernetes.io/projected/8e9d87d9-4356-4ed5-b3ac-8e2f1f851220-kube-api-access-6zzw2\") pod \"dnsmasq-dns-55f844cf75-dj8vd\" (UID: \"8e9d87d9-4356-4ed5-b3ac-8e2f1f851220\") " pod="openstack/dnsmasq-dns-55f844cf75-dj8vd" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.071292 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8e9d87d9-4356-4ed5-b3ac-8e2f1f851220-ovsdbserver-sb\") pod \"dnsmasq-dns-55f844cf75-dj8vd\" (UID: \"8e9d87d9-4356-4ed5-b3ac-8e2f1f851220\") " pod="openstack/dnsmasq-dns-55f844cf75-dj8vd" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.071327 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8e9d87d9-4356-4ed5-b3ac-8e2f1f851220-dns-swift-storage-0\") pod \"dnsmasq-dns-55f844cf75-dj8vd\" (UID: \"8e9d87d9-4356-4ed5-b3ac-8e2f1f851220\") " pod="openstack/dnsmasq-dns-55f844cf75-dj8vd" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.071371 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8e9d87d9-4356-4ed5-b3ac-8e2f1f851220-ovsdbserver-nb\") pod \"dnsmasq-dns-55f844cf75-dj8vd\" (UID: \"8e9d87d9-4356-4ed5-b3ac-8e2f1f851220\") " pod="openstack/dnsmasq-dns-55f844cf75-dj8vd" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.071414 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8e9d87d9-4356-4ed5-b3ac-8e2f1f851220-dns-svc\") pod \"dnsmasq-dns-55f844cf75-dj8vd\" (UID: \"8e9d87d9-4356-4ed5-b3ac-8e2f1f851220\") " pod="openstack/dnsmasq-dns-55f844cf75-dj8vd" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.071744 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-6rrkk" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.082049 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-f86fdcb78-t5sms"] Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.115052 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-nwgsw" podUID="5e0192ce-fa22-42ad-9c07-5154aa5b3801" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.145:5353: i/o timeout" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.173193 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8e9d87d9-4356-4ed5-b3ac-8e2f1f851220-dns-svc\") pod \"dnsmasq-dns-55f844cf75-dj8vd\" (UID: \"8e9d87d9-4356-4ed5-b3ac-8e2f1f851220\") " pod="openstack/dnsmasq-dns-55f844cf75-dj8vd" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.173260 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77ed04c3-9bf0-4ff4-81b7-5844412f66ca-combined-ca-bundle\") pod \"neutron-f86fdcb78-t5sms\" (UID: \"77ed04c3-9bf0-4ff4-81b7-5844412f66ca\") " pod="openstack/neutron-f86fdcb78-t5sms" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.173285 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8e9d87d9-4356-4ed5-b3ac-8e2f1f851220-config\") pod \"dnsmasq-dns-55f844cf75-dj8vd\" (UID: \"8e9d87d9-4356-4ed5-b3ac-8e2f1f851220\") " pod="openstack/dnsmasq-dns-55f844cf75-dj8vd" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.173394 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qknbb\" (UniqueName: \"kubernetes.io/projected/77ed04c3-9bf0-4ff4-81b7-5844412f66ca-kube-api-access-qknbb\") pod \"neutron-f86fdcb78-t5sms\" (UID: \"77ed04c3-9bf0-4ff4-81b7-5844412f66ca\") " pod="openstack/neutron-f86fdcb78-t5sms" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.173430 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/77ed04c3-9bf0-4ff4-81b7-5844412f66ca-httpd-config\") pod \"neutron-f86fdcb78-t5sms\" (UID: \"77ed04c3-9bf0-4ff4-81b7-5844412f66ca\") " pod="openstack/neutron-f86fdcb78-t5sms" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.173460 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6zzw2\" (UniqueName: \"kubernetes.io/projected/8e9d87d9-4356-4ed5-b3ac-8e2f1f851220-kube-api-access-6zzw2\") pod \"dnsmasq-dns-55f844cf75-dj8vd\" (UID: \"8e9d87d9-4356-4ed5-b3ac-8e2f1f851220\") " pod="openstack/dnsmasq-dns-55f844cf75-dj8vd" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.173507 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8e9d87d9-4356-4ed5-b3ac-8e2f1f851220-ovsdbserver-sb\") pod \"dnsmasq-dns-55f844cf75-dj8vd\" (UID: \"8e9d87d9-4356-4ed5-b3ac-8e2f1f851220\") " pod="openstack/dnsmasq-dns-55f844cf75-dj8vd" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.173528 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/77ed04c3-9bf0-4ff4-81b7-5844412f66ca-config\") pod \"neutron-f86fdcb78-t5sms\" (UID: \"77ed04c3-9bf0-4ff4-81b7-5844412f66ca\") " pod="openstack/neutron-f86fdcb78-t5sms" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.173545 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8e9d87d9-4356-4ed5-b3ac-8e2f1f851220-dns-swift-storage-0\") pod \"dnsmasq-dns-55f844cf75-dj8vd\" (UID: \"8e9d87d9-4356-4ed5-b3ac-8e2f1f851220\") " pod="openstack/dnsmasq-dns-55f844cf75-dj8vd" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.173573 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/77ed04c3-9bf0-4ff4-81b7-5844412f66ca-ovndb-tls-certs\") pod \"neutron-f86fdcb78-t5sms\" (UID: \"77ed04c3-9bf0-4ff4-81b7-5844412f66ca\") " pod="openstack/neutron-f86fdcb78-t5sms" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.173596 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8e9d87d9-4356-4ed5-b3ac-8e2f1f851220-ovsdbserver-nb\") pod \"dnsmasq-dns-55f844cf75-dj8vd\" (UID: \"8e9d87d9-4356-4ed5-b3ac-8e2f1f851220\") " pod="openstack/dnsmasq-dns-55f844cf75-dj8vd" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.176686 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8e9d87d9-4356-4ed5-b3ac-8e2f1f851220-dns-svc\") pod \"dnsmasq-dns-55f844cf75-dj8vd\" (UID: \"8e9d87d9-4356-4ed5-b3ac-8e2f1f851220\") " pod="openstack/dnsmasq-dns-55f844cf75-dj8vd" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.177255 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8e9d87d9-4356-4ed5-b3ac-8e2f1f851220-config\") pod \"dnsmasq-dns-55f844cf75-dj8vd\" (UID: \"8e9d87d9-4356-4ed5-b3ac-8e2f1f851220\") " pod="openstack/dnsmasq-dns-55f844cf75-dj8vd" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.178135 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8e9d87d9-4356-4ed5-b3ac-8e2f1f851220-ovsdbserver-sb\") pod \"dnsmasq-dns-55f844cf75-dj8vd\" (UID: \"8e9d87d9-4356-4ed5-b3ac-8e2f1f851220\") " pod="openstack/dnsmasq-dns-55f844cf75-dj8vd" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.191854 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8e9d87d9-4356-4ed5-b3ac-8e2f1f851220-dns-swift-storage-0\") pod \"dnsmasq-dns-55f844cf75-dj8vd\" (UID: \"8e9d87d9-4356-4ed5-b3ac-8e2f1f851220\") " pod="openstack/dnsmasq-dns-55f844cf75-dj8vd" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.196903 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8e9d87d9-4356-4ed5-b3ac-8e2f1f851220-ovsdbserver-nb\") pod \"dnsmasq-dns-55f844cf75-dj8vd\" (UID: \"8e9d87d9-4356-4ed5-b3ac-8e2f1f851220\") " pod="openstack/dnsmasq-dns-55f844cf75-dj8vd" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.211757 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6zzw2\" (UniqueName: \"kubernetes.io/projected/8e9d87d9-4356-4ed5-b3ac-8e2f1f851220-kube-api-access-6zzw2\") pod \"dnsmasq-dns-55f844cf75-dj8vd\" (UID: \"8e9d87d9-4356-4ed5-b3ac-8e2f1f851220\") " pod="openstack/dnsmasq-dns-55f844cf75-dj8vd" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.246290 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5e0192ce-fa22-42ad-9c07-5154aa5b3801" path="/var/lib/kubelet/pods/5e0192ce-fa22-42ad-9c07-5154aa5b3801/volumes" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.267356 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55f844cf75-dj8vd" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.277189 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qknbb\" (UniqueName: \"kubernetes.io/projected/77ed04c3-9bf0-4ff4-81b7-5844412f66ca-kube-api-access-qknbb\") pod \"neutron-f86fdcb78-t5sms\" (UID: \"77ed04c3-9bf0-4ff4-81b7-5844412f66ca\") " pod="openstack/neutron-f86fdcb78-t5sms" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.277271 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/77ed04c3-9bf0-4ff4-81b7-5844412f66ca-httpd-config\") pod \"neutron-f86fdcb78-t5sms\" (UID: \"77ed04c3-9bf0-4ff4-81b7-5844412f66ca\") " pod="openstack/neutron-f86fdcb78-t5sms" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.277368 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/77ed04c3-9bf0-4ff4-81b7-5844412f66ca-config\") pod \"neutron-f86fdcb78-t5sms\" (UID: \"77ed04c3-9bf0-4ff4-81b7-5844412f66ca\") " pod="openstack/neutron-f86fdcb78-t5sms" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.277412 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/77ed04c3-9bf0-4ff4-81b7-5844412f66ca-ovndb-tls-certs\") pod \"neutron-f86fdcb78-t5sms\" (UID: \"77ed04c3-9bf0-4ff4-81b7-5844412f66ca\") " pod="openstack/neutron-f86fdcb78-t5sms" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.277486 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77ed04c3-9bf0-4ff4-81b7-5844412f66ca-combined-ca-bundle\") pod \"neutron-f86fdcb78-t5sms\" (UID: \"77ed04c3-9bf0-4ff4-81b7-5844412f66ca\") " pod="openstack/neutron-f86fdcb78-t5sms" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.289406 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/77ed04c3-9bf0-4ff4-81b7-5844412f66ca-httpd-config\") pod \"neutron-f86fdcb78-t5sms\" (UID: \"77ed04c3-9bf0-4ff4-81b7-5844412f66ca\") " pod="openstack/neutron-f86fdcb78-t5sms" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.293013 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/77ed04c3-9bf0-4ff4-81b7-5844412f66ca-ovndb-tls-certs\") pod \"neutron-f86fdcb78-t5sms\" (UID: \"77ed04c3-9bf0-4ff4-81b7-5844412f66ca\") " pod="openstack/neutron-f86fdcb78-t5sms" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.295302 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77ed04c3-9bf0-4ff4-81b7-5844412f66ca-combined-ca-bundle\") pod \"neutron-f86fdcb78-t5sms\" (UID: \"77ed04c3-9bf0-4ff4-81b7-5844412f66ca\") " pod="openstack/neutron-f86fdcb78-t5sms" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.307016 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/77ed04c3-9bf0-4ff4-81b7-5844412f66ca-config\") pod \"neutron-f86fdcb78-t5sms\" (UID: \"77ed04c3-9bf0-4ff4-81b7-5844412f66ca\") " pod="openstack/neutron-f86fdcb78-t5sms" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.307397 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qknbb\" (UniqueName: \"kubernetes.io/projected/77ed04c3-9bf0-4ff4-81b7-5844412f66ca-kube-api-access-qknbb\") pod \"neutron-f86fdcb78-t5sms\" (UID: \"77ed04c3-9bf0-4ff4-81b7-5844412f66ca\") " pod="openstack/neutron-f86fdcb78-t5sms" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.452971 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-f86fdcb78-t5sms" Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.534748 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c0b9015f-858a-47f4-b619-a47e54c09d03","Type":"ContainerStarted","Data":"dff61c3808e480eb5344a9679d346371a6ed83e46e414d3b1288cb4d60a7b29a"} Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.562623 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"381de784-1e86-405c-84a0-169fd93d2ef2","Type":"ContainerStarted","Data":"d6a42dcf537008f4f39a0f551b4a0d479fc86d2c5cf5f63a92c092513609ccf6"} Dec 09 15:32:59 crc kubenswrapper[4716]: I1209 15:32:59.990956 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-dj8vd"] Dec 09 15:33:00 crc kubenswrapper[4716]: I1209 15:33:00.225849 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-f86fdcb78-t5sms"] Dec 09 15:33:00 crc kubenswrapper[4716]: W1209 15:33:00.451150 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod77ed04c3_9bf0_4ff4_81b7_5844412f66ca.slice/crio-adf3f2aff71e6f37745e3022e3915edd0c04a9c35a07710820aaaf2a9e6232a5 WatchSource:0}: Error finding container adf3f2aff71e6f37745e3022e3915edd0c04a9c35a07710820aaaf2a9e6232a5: Status 404 returned error can't find the container with id adf3f2aff71e6f37745e3022e3915edd0c04a9c35a07710820aaaf2a9e6232a5 Dec 09 15:33:00 crc kubenswrapper[4716]: I1209 15:33:00.595299 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"381de784-1e86-405c-84a0-169fd93d2ef2","Type":"ContainerStarted","Data":"e0b3bce6e4f911d56460e7e6c57e92cb0818375ba9c445e1cb2d33a6fcc8253c"} Dec 09 15:33:00 crc kubenswrapper[4716]: I1209 15:33:00.599010 4716 generic.go:334] "Generic (PLEG): container finished" podID="2c59c8f4-e888-4345-8cdd-5581ef0f801c" containerID="7581187e2cfe62902b026e949dc19004b041c21d3ec7d4fa8841f43d63de9847" exitCode=0 Dec 09 15:33:00 crc kubenswrapper[4716]: I1209 15:33:00.599110 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-d4ccd" event={"ID":"2c59c8f4-e888-4345-8cdd-5581ef0f801c","Type":"ContainerDied","Data":"7581187e2cfe62902b026e949dc19004b041c21d3ec7d4fa8841f43d63de9847"} Dec 09 15:33:00 crc kubenswrapper[4716]: I1209 15:33:00.602147 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-dj8vd" event={"ID":"8e9d87d9-4356-4ed5-b3ac-8e2f1f851220","Type":"ContainerStarted","Data":"5b7611748b987b4600381e41d7be69122a369823c73ce750a84116e6afd22cc5"} Dec 09 15:33:00 crc kubenswrapper[4716]: I1209 15:33:00.615673 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-f86fdcb78-t5sms" event={"ID":"77ed04c3-9bf0-4ff4-81b7-5844412f66ca","Type":"ContainerStarted","Data":"adf3f2aff71e6f37745e3022e3915edd0c04a9c35a07710820aaaf2a9e6232a5"} Dec 09 15:33:00 crc kubenswrapper[4716]: I1209 15:33:00.661563 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c0b9015f-858a-47f4-b619-a47e54c09d03","Type":"ContainerStarted","Data":"f9b61364f0ef1deff6be397a1c18bbaf24bc9a0bbec192df0cc269de6d21535b"} Dec 09 15:33:00 crc kubenswrapper[4716]: I1209 15:33:00.661665 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=20.661619319 podStartE2EDuration="20.661619319s" podCreationTimestamp="2025-12-09 15:32:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:33:00.624858149 +0000 UTC m=+1467.779602137" watchObservedRunningTime="2025-12-09 15:33:00.661619319 +0000 UTC m=+1467.816363307" Dec 09 15:33:00 crc kubenswrapper[4716]: I1209 15:33:00.723616 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=20.723528323 podStartE2EDuration="20.723528323s" podCreationTimestamp="2025-12-09 15:32:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:33:00.689810141 +0000 UTC m=+1467.844554139" watchObservedRunningTime="2025-12-09 15:33:00.723528323 +0000 UTC m=+1467.878272321" Dec 09 15:33:00 crc kubenswrapper[4716]: I1209 15:33:00.951637 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Dec 09 15:33:00 crc kubenswrapper[4716]: I1209 15:33:00.952037 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Dec 09 15:33:00 crc kubenswrapper[4716]: I1209 15:33:00.993139 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Dec 09 15:33:00 crc kubenswrapper[4716]: I1209 15:33:00.993188 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Dec 09 15:33:01 crc kubenswrapper[4716]: I1209 15:33:01.013239 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Dec 09 15:33:01 crc kubenswrapper[4716]: I1209 15:33:01.042290 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Dec 09 15:33:01 crc kubenswrapper[4716]: I1209 15:33:01.083743 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Dec 09 15:33:01 crc kubenswrapper[4716]: I1209 15:33:01.121445 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Dec 09 15:33:01 crc kubenswrapper[4716]: I1209 15:33:01.679967 4716 generic.go:334] "Generic (PLEG): container finished" podID="8e9d87d9-4356-4ed5-b3ac-8e2f1f851220" containerID="968a76c56a7d236e6f73e80d519edca7bc7ca4896ac8c37f033ee084876122f9" exitCode=0 Dec 09 15:33:01 crc kubenswrapper[4716]: I1209 15:33:01.680378 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-dj8vd" event={"ID":"8e9d87d9-4356-4ed5-b3ac-8e2f1f851220","Type":"ContainerDied","Data":"968a76c56a7d236e6f73e80d519edca7bc7ca4896ac8c37f033ee084876122f9"} Dec 09 15:33:01 crc kubenswrapper[4716]: I1209 15:33:01.684858 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9c5e102f-3947-4eae-a8de-78bb48d177e9","Type":"ContainerStarted","Data":"e2db032a3fbade4d5b688db4e6e5d90f3b78ab05ee9147a2da305112eb810b4f"} Dec 09 15:33:01 crc kubenswrapper[4716]: I1209 15:33:01.692329 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-f86fdcb78-t5sms" event={"ID":"77ed04c3-9bf0-4ff4-81b7-5844412f66ca","Type":"ContainerStarted","Data":"373447bd50d24d9ebc4b475510aa6327a4545e4a75fd0031bcc813ddcbd7edb8"} Dec 09 15:33:01 crc kubenswrapper[4716]: I1209 15:33:01.692396 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-f86fdcb78-t5sms" event={"ID":"77ed04c3-9bf0-4ff4-81b7-5844412f66ca","Type":"ContainerStarted","Data":"24b6becee0a78737857fbbcf37f65c5366fad7647320ca9e776989f7cc524b24"} Dec 09 15:33:01 crc kubenswrapper[4716]: I1209 15:33:01.692420 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Dec 09 15:33:01 crc kubenswrapper[4716]: I1209 15:33:01.692468 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Dec 09 15:33:01 crc kubenswrapper[4716]: I1209 15:33:01.692566 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-f86fdcb78-t5sms" Dec 09 15:33:01 crc kubenswrapper[4716]: I1209 15:33:01.694943 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Dec 09 15:33:01 crc kubenswrapper[4716]: I1209 15:33:01.694973 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Dec 09 15:33:01 crc kubenswrapper[4716]: I1209 15:33:01.746823 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-f86fdcb78-t5sms" podStartSLOduration=2.746793294 podStartE2EDuration="2.746793294s" podCreationTimestamp="2025-12-09 15:32:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:33:01.726333874 +0000 UTC m=+1468.881077862" watchObservedRunningTime="2025-12-09 15:33:01.746793294 +0000 UTC m=+1468.901537292" Dec 09 15:33:01 crc kubenswrapper[4716]: I1209 15:33:01.883312 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-67d7f5448f-zcfm9"] Dec 09 15:33:01 crc kubenswrapper[4716]: I1209 15:33:01.889074 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-67d7f5448f-zcfm9" Dec 09 15:33:01 crc kubenswrapper[4716]: I1209 15:33:01.892773 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Dec 09 15:33:01 crc kubenswrapper[4716]: I1209 15:33:01.892956 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Dec 09 15:33:01 crc kubenswrapper[4716]: I1209 15:33:01.905075 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-67d7f5448f-zcfm9"] Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.004004 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/9d36793d-f026-47a5-a21f-1272aa1dbf58-httpd-config\") pod \"neutron-67d7f5448f-zcfm9\" (UID: \"9d36793d-f026-47a5-a21f-1272aa1dbf58\") " pod="openstack/neutron-67d7f5448f-zcfm9" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.004165 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d36793d-f026-47a5-a21f-1272aa1dbf58-combined-ca-bundle\") pod \"neutron-67d7f5448f-zcfm9\" (UID: \"9d36793d-f026-47a5-a21f-1272aa1dbf58\") " pod="openstack/neutron-67d7f5448f-zcfm9" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.004238 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dkv6x\" (UniqueName: \"kubernetes.io/projected/9d36793d-f026-47a5-a21f-1272aa1dbf58-kube-api-access-dkv6x\") pod \"neutron-67d7f5448f-zcfm9\" (UID: \"9d36793d-f026-47a5-a21f-1272aa1dbf58\") " pod="openstack/neutron-67d7f5448f-zcfm9" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.004294 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/9d36793d-f026-47a5-a21f-1272aa1dbf58-config\") pod \"neutron-67d7f5448f-zcfm9\" (UID: \"9d36793d-f026-47a5-a21f-1272aa1dbf58\") " pod="openstack/neutron-67d7f5448f-zcfm9" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.004361 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d36793d-f026-47a5-a21f-1272aa1dbf58-internal-tls-certs\") pod \"neutron-67d7f5448f-zcfm9\" (UID: \"9d36793d-f026-47a5-a21f-1272aa1dbf58\") " pod="openstack/neutron-67d7f5448f-zcfm9" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.004517 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d36793d-f026-47a5-a21f-1272aa1dbf58-public-tls-certs\") pod \"neutron-67d7f5448f-zcfm9\" (UID: \"9d36793d-f026-47a5-a21f-1272aa1dbf58\") " pod="openstack/neutron-67d7f5448f-zcfm9" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.004587 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d36793d-f026-47a5-a21f-1272aa1dbf58-ovndb-tls-certs\") pod \"neutron-67d7f5448f-zcfm9\" (UID: \"9d36793d-f026-47a5-a21f-1272aa1dbf58\") " pod="openstack/neutron-67d7f5448f-zcfm9" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.106595 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dkv6x\" (UniqueName: \"kubernetes.io/projected/9d36793d-f026-47a5-a21f-1272aa1dbf58-kube-api-access-dkv6x\") pod \"neutron-67d7f5448f-zcfm9\" (UID: \"9d36793d-f026-47a5-a21f-1272aa1dbf58\") " pod="openstack/neutron-67d7f5448f-zcfm9" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.107013 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/9d36793d-f026-47a5-a21f-1272aa1dbf58-config\") pod \"neutron-67d7f5448f-zcfm9\" (UID: \"9d36793d-f026-47a5-a21f-1272aa1dbf58\") " pod="openstack/neutron-67d7f5448f-zcfm9" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.107057 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d36793d-f026-47a5-a21f-1272aa1dbf58-internal-tls-certs\") pod \"neutron-67d7f5448f-zcfm9\" (UID: \"9d36793d-f026-47a5-a21f-1272aa1dbf58\") " pod="openstack/neutron-67d7f5448f-zcfm9" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.107120 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d36793d-f026-47a5-a21f-1272aa1dbf58-public-tls-certs\") pod \"neutron-67d7f5448f-zcfm9\" (UID: \"9d36793d-f026-47a5-a21f-1272aa1dbf58\") " pod="openstack/neutron-67d7f5448f-zcfm9" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.107157 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d36793d-f026-47a5-a21f-1272aa1dbf58-ovndb-tls-certs\") pod \"neutron-67d7f5448f-zcfm9\" (UID: \"9d36793d-f026-47a5-a21f-1272aa1dbf58\") " pod="openstack/neutron-67d7f5448f-zcfm9" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.107220 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/9d36793d-f026-47a5-a21f-1272aa1dbf58-httpd-config\") pod \"neutron-67d7f5448f-zcfm9\" (UID: \"9d36793d-f026-47a5-a21f-1272aa1dbf58\") " pod="openstack/neutron-67d7f5448f-zcfm9" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.107286 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d36793d-f026-47a5-a21f-1272aa1dbf58-combined-ca-bundle\") pod \"neutron-67d7f5448f-zcfm9\" (UID: \"9d36793d-f026-47a5-a21f-1272aa1dbf58\") " pod="openstack/neutron-67d7f5448f-zcfm9" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.146035 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/9d36793d-f026-47a5-a21f-1272aa1dbf58-config\") pod \"neutron-67d7f5448f-zcfm9\" (UID: \"9d36793d-f026-47a5-a21f-1272aa1dbf58\") " pod="openstack/neutron-67d7f5448f-zcfm9" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.146800 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d36793d-f026-47a5-a21f-1272aa1dbf58-combined-ca-bundle\") pod \"neutron-67d7f5448f-zcfm9\" (UID: \"9d36793d-f026-47a5-a21f-1272aa1dbf58\") " pod="openstack/neutron-67d7f5448f-zcfm9" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.149999 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d36793d-f026-47a5-a21f-1272aa1dbf58-public-tls-certs\") pod \"neutron-67d7f5448f-zcfm9\" (UID: \"9d36793d-f026-47a5-a21f-1272aa1dbf58\") " pod="openstack/neutron-67d7f5448f-zcfm9" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.153093 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d36793d-f026-47a5-a21f-1272aa1dbf58-internal-tls-certs\") pod \"neutron-67d7f5448f-zcfm9\" (UID: \"9d36793d-f026-47a5-a21f-1272aa1dbf58\") " pod="openstack/neutron-67d7f5448f-zcfm9" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.156577 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/9d36793d-f026-47a5-a21f-1272aa1dbf58-httpd-config\") pod \"neutron-67d7f5448f-zcfm9\" (UID: \"9d36793d-f026-47a5-a21f-1272aa1dbf58\") " pod="openstack/neutron-67d7f5448f-zcfm9" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.157016 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dkv6x\" (UniqueName: \"kubernetes.io/projected/9d36793d-f026-47a5-a21f-1272aa1dbf58-kube-api-access-dkv6x\") pod \"neutron-67d7f5448f-zcfm9\" (UID: \"9d36793d-f026-47a5-a21f-1272aa1dbf58\") " pod="openstack/neutron-67d7f5448f-zcfm9" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.157868 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d36793d-f026-47a5-a21f-1272aa1dbf58-ovndb-tls-certs\") pod \"neutron-67d7f5448f-zcfm9\" (UID: \"9d36793d-f026-47a5-a21f-1272aa1dbf58\") " pod="openstack/neutron-67d7f5448f-zcfm9" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.179414 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-d4ccd" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.310411 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c59c8f4-e888-4345-8cdd-5581ef0f801c-config-data\") pod \"2c59c8f4-e888-4345-8cdd-5581ef0f801c\" (UID: \"2c59c8f4-e888-4345-8cdd-5581ef0f801c\") " Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.310551 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2c59c8f4-e888-4345-8cdd-5581ef0f801c-scripts\") pod \"2c59c8f4-e888-4345-8cdd-5581ef0f801c\" (UID: \"2c59c8f4-e888-4345-8cdd-5581ef0f801c\") " Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.310668 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c59c8f4-e888-4345-8cdd-5581ef0f801c-logs\") pod \"2c59c8f4-e888-4345-8cdd-5581ef0f801c\" (UID: \"2c59c8f4-e888-4345-8cdd-5581ef0f801c\") " Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.310703 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c59c8f4-e888-4345-8cdd-5581ef0f801c-combined-ca-bundle\") pod \"2c59c8f4-e888-4345-8cdd-5581ef0f801c\" (UID: \"2c59c8f4-e888-4345-8cdd-5581ef0f801c\") " Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.310828 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6cmjf\" (UniqueName: \"kubernetes.io/projected/2c59c8f4-e888-4345-8cdd-5581ef0f801c-kube-api-access-6cmjf\") pod \"2c59c8f4-e888-4345-8cdd-5581ef0f801c\" (UID: \"2c59c8f4-e888-4345-8cdd-5581ef0f801c\") " Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.312997 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2c59c8f4-e888-4345-8cdd-5581ef0f801c-logs" (OuterVolumeSpecName: "logs") pod "2c59c8f4-e888-4345-8cdd-5581ef0f801c" (UID: "2c59c8f4-e888-4345-8cdd-5581ef0f801c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.314609 4716 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c59c8f4-e888-4345-8cdd-5581ef0f801c-logs\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.316487 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c59c8f4-e888-4345-8cdd-5581ef0f801c-kube-api-access-6cmjf" (OuterVolumeSpecName: "kube-api-access-6cmjf") pod "2c59c8f4-e888-4345-8cdd-5581ef0f801c" (UID: "2c59c8f4-e888-4345-8cdd-5581ef0f801c"). InnerVolumeSpecName "kube-api-access-6cmjf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.317732 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c59c8f4-e888-4345-8cdd-5581ef0f801c-scripts" (OuterVolumeSpecName: "scripts") pod "2c59c8f4-e888-4345-8cdd-5581ef0f801c" (UID: "2c59c8f4-e888-4345-8cdd-5581ef0f801c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.342315 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-67d7f5448f-zcfm9" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.345880 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c59c8f4-e888-4345-8cdd-5581ef0f801c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2c59c8f4-e888-4345-8cdd-5581ef0f801c" (UID: "2c59c8f4-e888-4345-8cdd-5581ef0f801c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.355078 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c59c8f4-e888-4345-8cdd-5581ef0f801c-config-data" (OuterVolumeSpecName: "config-data") pod "2c59c8f4-e888-4345-8cdd-5581ef0f801c" (UID: "2c59c8f4-e888-4345-8cdd-5581ef0f801c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.426249 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6cmjf\" (UniqueName: \"kubernetes.io/projected/2c59c8f4-e888-4345-8cdd-5581ef0f801c-kube-api-access-6cmjf\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.426330 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c59c8f4-e888-4345-8cdd-5581ef0f801c-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.426348 4716 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2c59c8f4-e888-4345-8cdd-5581ef0f801c-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.426370 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c59c8f4-e888-4345-8cdd-5581ef0f801c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.727883 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-d4ccd" event={"ID":"2c59c8f4-e888-4345-8cdd-5581ef0f801c","Type":"ContainerDied","Data":"8d702c4cba67503ba15818e468e4ac3ef52bf36babcb6a91c711ef98ba04b14c"} Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.728898 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8d702c4cba67503ba15818e468e4ac3ef52bf36babcb6a91c711ef98ba04b14c" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.729122 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-d4ccd" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.851108 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-864d657bcd-fb9jd"] Dec 09 15:33:02 crc kubenswrapper[4716]: E1209 15:33:02.851657 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c59c8f4-e888-4345-8cdd-5581ef0f801c" containerName="placement-db-sync" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.851670 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c59c8f4-e888-4345-8cdd-5581ef0f801c" containerName="placement-db-sync" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.851948 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c59c8f4-e888-4345-8cdd-5581ef0f801c" containerName="placement-db-sync" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.853440 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-864d657bcd-fb9jd" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.856922 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.857200 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.857589 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.857641 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-flrvf" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.857812 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.868275 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-864d657bcd-fb9jd"] Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.947489 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1bd96021-5850-4f28-b8e7-666617201e0e-public-tls-certs\") pod \"placement-864d657bcd-fb9jd\" (UID: \"1bd96021-5850-4f28-b8e7-666617201e0e\") " pod="openstack/placement-864d657bcd-fb9jd" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.947547 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1bd96021-5850-4f28-b8e7-666617201e0e-internal-tls-certs\") pod \"placement-864d657bcd-fb9jd\" (UID: \"1bd96021-5850-4f28-b8e7-666617201e0e\") " pod="openstack/placement-864d657bcd-fb9jd" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.947676 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1bd96021-5850-4f28-b8e7-666617201e0e-config-data\") pod \"placement-864d657bcd-fb9jd\" (UID: \"1bd96021-5850-4f28-b8e7-666617201e0e\") " pod="openstack/placement-864d657bcd-fb9jd" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.947712 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1bd96021-5850-4f28-b8e7-666617201e0e-scripts\") pod \"placement-864d657bcd-fb9jd\" (UID: \"1bd96021-5850-4f28-b8e7-666617201e0e\") " pod="openstack/placement-864d657bcd-fb9jd" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.947736 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1bd96021-5850-4f28-b8e7-666617201e0e-logs\") pod \"placement-864d657bcd-fb9jd\" (UID: \"1bd96021-5850-4f28-b8e7-666617201e0e\") " pod="openstack/placement-864d657bcd-fb9jd" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.947822 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l79m7\" (UniqueName: \"kubernetes.io/projected/1bd96021-5850-4f28-b8e7-666617201e0e-kube-api-access-l79m7\") pod \"placement-864d657bcd-fb9jd\" (UID: \"1bd96021-5850-4f28-b8e7-666617201e0e\") " pod="openstack/placement-864d657bcd-fb9jd" Dec 09 15:33:02 crc kubenswrapper[4716]: I1209 15:33:02.947945 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1bd96021-5850-4f28-b8e7-666617201e0e-combined-ca-bundle\") pod \"placement-864d657bcd-fb9jd\" (UID: \"1bd96021-5850-4f28-b8e7-666617201e0e\") " pod="openstack/placement-864d657bcd-fb9jd" Dec 09 15:33:03 crc kubenswrapper[4716]: I1209 15:33:03.047715 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-67d7f5448f-zcfm9"] Dec 09 15:33:03 crc kubenswrapper[4716]: I1209 15:33:03.057101 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1bd96021-5850-4f28-b8e7-666617201e0e-scripts\") pod \"placement-864d657bcd-fb9jd\" (UID: \"1bd96021-5850-4f28-b8e7-666617201e0e\") " pod="openstack/placement-864d657bcd-fb9jd" Dec 09 15:33:03 crc kubenswrapper[4716]: I1209 15:33:03.057210 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1bd96021-5850-4f28-b8e7-666617201e0e-logs\") pod \"placement-864d657bcd-fb9jd\" (UID: \"1bd96021-5850-4f28-b8e7-666617201e0e\") " pod="openstack/placement-864d657bcd-fb9jd" Dec 09 15:33:03 crc kubenswrapper[4716]: I1209 15:33:03.057387 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l79m7\" (UniqueName: \"kubernetes.io/projected/1bd96021-5850-4f28-b8e7-666617201e0e-kube-api-access-l79m7\") pod \"placement-864d657bcd-fb9jd\" (UID: \"1bd96021-5850-4f28-b8e7-666617201e0e\") " pod="openstack/placement-864d657bcd-fb9jd" Dec 09 15:33:03 crc kubenswrapper[4716]: I1209 15:33:03.057672 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1bd96021-5850-4f28-b8e7-666617201e0e-combined-ca-bundle\") pod \"placement-864d657bcd-fb9jd\" (UID: \"1bd96021-5850-4f28-b8e7-666617201e0e\") " pod="openstack/placement-864d657bcd-fb9jd" Dec 09 15:33:03 crc kubenswrapper[4716]: I1209 15:33:03.058076 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1bd96021-5850-4f28-b8e7-666617201e0e-public-tls-certs\") pod \"placement-864d657bcd-fb9jd\" (UID: \"1bd96021-5850-4f28-b8e7-666617201e0e\") " pod="openstack/placement-864d657bcd-fb9jd" Dec 09 15:33:03 crc kubenswrapper[4716]: I1209 15:33:03.058104 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1bd96021-5850-4f28-b8e7-666617201e0e-internal-tls-certs\") pod \"placement-864d657bcd-fb9jd\" (UID: \"1bd96021-5850-4f28-b8e7-666617201e0e\") " pod="openstack/placement-864d657bcd-fb9jd" Dec 09 15:33:03 crc kubenswrapper[4716]: I1209 15:33:03.058228 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1bd96021-5850-4f28-b8e7-666617201e0e-config-data\") pod \"placement-864d657bcd-fb9jd\" (UID: \"1bd96021-5850-4f28-b8e7-666617201e0e\") " pod="openstack/placement-864d657bcd-fb9jd" Dec 09 15:33:03 crc kubenswrapper[4716]: I1209 15:33:03.067051 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1bd96021-5850-4f28-b8e7-666617201e0e-logs\") pod \"placement-864d657bcd-fb9jd\" (UID: \"1bd96021-5850-4f28-b8e7-666617201e0e\") " pod="openstack/placement-864d657bcd-fb9jd" Dec 09 15:33:03 crc kubenswrapper[4716]: I1209 15:33:03.092081 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1bd96021-5850-4f28-b8e7-666617201e0e-config-data\") pod \"placement-864d657bcd-fb9jd\" (UID: \"1bd96021-5850-4f28-b8e7-666617201e0e\") " pod="openstack/placement-864d657bcd-fb9jd" Dec 09 15:33:03 crc kubenswrapper[4716]: I1209 15:33:03.093419 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1bd96021-5850-4f28-b8e7-666617201e0e-public-tls-certs\") pod \"placement-864d657bcd-fb9jd\" (UID: \"1bd96021-5850-4f28-b8e7-666617201e0e\") " pod="openstack/placement-864d657bcd-fb9jd" Dec 09 15:33:03 crc kubenswrapper[4716]: I1209 15:33:03.105307 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l79m7\" (UniqueName: \"kubernetes.io/projected/1bd96021-5850-4f28-b8e7-666617201e0e-kube-api-access-l79m7\") pod \"placement-864d657bcd-fb9jd\" (UID: \"1bd96021-5850-4f28-b8e7-666617201e0e\") " pod="openstack/placement-864d657bcd-fb9jd" Dec 09 15:33:03 crc kubenswrapper[4716]: I1209 15:33:03.105845 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1bd96021-5850-4f28-b8e7-666617201e0e-internal-tls-certs\") pod \"placement-864d657bcd-fb9jd\" (UID: \"1bd96021-5850-4f28-b8e7-666617201e0e\") " pod="openstack/placement-864d657bcd-fb9jd" Dec 09 15:33:03 crc kubenswrapper[4716]: I1209 15:33:03.118213 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1bd96021-5850-4f28-b8e7-666617201e0e-scripts\") pod \"placement-864d657bcd-fb9jd\" (UID: \"1bd96021-5850-4f28-b8e7-666617201e0e\") " pod="openstack/placement-864d657bcd-fb9jd" Dec 09 15:33:03 crc kubenswrapper[4716]: I1209 15:33:03.124874 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1bd96021-5850-4f28-b8e7-666617201e0e-combined-ca-bundle\") pod \"placement-864d657bcd-fb9jd\" (UID: \"1bd96021-5850-4f28-b8e7-666617201e0e\") " pod="openstack/placement-864d657bcd-fb9jd" Dec 09 15:33:03 crc kubenswrapper[4716]: I1209 15:33:03.193309 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-864d657bcd-fb9jd" Dec 09 15:33:03 crc kubenswrapper[4716]: I1209 15:33:03.787387 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-xr25h" event={"ID":"e1fa7713-f67e-45a2-81c0-73a56280f744","Type":"ContainerStarted","Data":"c2fa73a1329bbedc77ddddf004e0c6c1f8f493e99de0f806acb9cf4f307e172c"} Dec 09 15:33:03 crc kubenswrapper[4716]: I1209 15:33:03.795062 4716 generic.go:334] "Generic (PLEG): container finished" podID="ff2490f6-9bf3-41a3-a2a5-48e1e7d26738" containerID="ae8761e7087d4c45bc84a019ea59e6f13963ed8ab357ecf369467de782ebdc56" exitCode=0 Dec 09 15:33:03 crc kubenswrapper[4716]: I1209 15:33:03.795122 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-7rkmg" event={"ID":"ff2490f6-9bf3-41a3-a2a5-48e1e7d26738","Type":"ContainerDied","Data":"ae8761e7087d4c45bc84a019ea59e6f13963ed8ab357ecf369467de782ebdc56"} Dec 09 15:33:03 crc kubenswrapper[4716]: I1209 15:33:03.808587 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-dj8vd" event={"ID":"8e9d87d9-4356-4ed5-b3ac-8e2f1f851220","Type":"ContainerStarted","Data":"e6a2c0a56133fcd219b5b65d84f7a3281373d6bc8413978af0a990c3d108b608"} Dec 09 15:33:03 crc kubenswrapper[4716]: I1209 15:33:03.809902 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-55f844cf75-dj8vd" Dec 09 15:33:03 crc kubenswrapper[4716]: I1209 15:33:03.815956 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-67d7f5448f-zcfm9" event={"ID":"9d36793d-f026-47a5-a21f-1272aa1dbf58","Type":"ContainerStarted","Data":"4afe5888c91e816e0d5e315ca9bb5cf0299af7dc8cdd8b7968b7a3c8bc6b3f86"} Dec 09 15:33:03 crc kubenswrapper[4716]: I1209 15:33:03.815995 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-67d7f5448f-zcfm9" event={"ID":"9d36793d-f026-47a5-a21f-1272aa1dbf58","Type":"ContainerStarted","Data":"9fd902711be3ddfddb47396b01c98dda15857089f942cafd1fffed3a44e825e4"} Dec 09 15:33:03 crc kubenswrapper[4716]: I1209 15:33:03.831914 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-sync-xr25h" podStartSLOduration=4.6981479329999996 podStartE2EDuration="45.8318853s" podCreationTimestamp="2025-12-09 15:32:18 +0000 UTC" firstStartedPulling="2025-12-09 15:32:20.759788406 +0000 UTC m=+1427.914532394" lastFinishedPulling="2025-12-09 15:33:01.893525773 +0000 UTC m=+1469.048269761" observedRunningTime="2025-12-09 15:33:03.806250531 +0000 UTC m=+1470.960994519" watchObservedRunningTime="2025-12-09 15:33:03.8318853 +0000 UTC m=+1470.986629288" Dec 09 15:33:03 crc kubenswrapper[4716]: I1209 15:33:03.863608 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-55f844cf75-dj8vd" podStartSLOduration=5.8635844630000005 podStartE2EDuration="5.863584463s" podCreationTimestamp="2025-12-09 15:32:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:33:03.852306618 +0000 UTC m=+1471.007050606" watchObservedRunningTime="2025-12-09 15:33:03.863584463 +0000 UTC m=+1471.018328451" Dec 09 15:33:03 crc kubenswrapper[4716]: I1209 15:33:03.919775 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-864d657bcd-fb9jd"] Dec 09 15:33:04 crc kubenswrapper[4716]: I1209 15:33:04.837129 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-67d7f5448f-zcfm9" event={"ID":"9d36793d-f026-47a5-a21f-1272aa1dbf58","Type":"ContainerStarted","Data":"b4028dddb549c051a645e4efa2965a1633d074f9c3361f9a7f57a16528968e27"} Dec 09 15:33:04 crc kubenswrapper[4716]: I1209 15:33:04.837673 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-67d7f5448f-zcfm9" Dec 09 15:33:04 crc kubenswrapper[4716]: I1209 15:33:04.843320 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-864d657bcd-fb9jd" event={"ID":"1bd96021-5850-4f28-b8e7-666617201e0e","Type":"ContainerStarted","Data":"25269f6f76f37290fefd8241671f204be05d591f887f084fcdb86c51bb730d15"} Dec 09 15:33:04 crc kubenswrapper[4716]: I1209 15:33:04.843368 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-864d657bcd-fb9jd" event={"ID":"1bd96021-5850-4f28-b8e7-666617201e0e","Type":"ContainerStarted","Data":"960cd68c887571b7cb10043e33a7b6c7235623fb08d06fcd03b12f06a8633dd9"} Dec 09 15:33:04 crc kubenswrapper[4716]: I1209 15:33:04.843388 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-864d657bcd-fb9jd" Dec 09 15:33:04 crc kubenswrapper[4716]: I1209 15:33:04.843401 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-864d657bcd-fb9jd" Dec 09 15:33:04 crc kubenswrapper[4716]: I1209 15:33:04.843409 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-864d657bcd-fb9jd" event={"ID":"1bd96021-5850-4f28-b8e7-666617201e0e","Type":"ContainerStarted","Data":"05dc8b64bde03eec3d2616920833d1fe98fbb3a2289a2fd20d11a8b324ee6e39"} Dec 09 15:33:04 crc kubenswrapper[4716]: I1209 15:33:04.874597 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-67d7f5448f-zcfm9" podStartSLOduration=3.87456748 podStartE2EDuration="3.87456748s" podCreationTimestamp="2025-12-09 15:33:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:33:04.867361103 +0000 UTC m=+1472.022105111" watchObservedRunningTime="2025-12-09 15:33:04.87456748 +0000 UTC m=+1472.029311478" Dec 09 15:33:04 crc kubenswrapper[4716]: I1209 15:33:04.904721 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-864d657bcd-fb9jd" podStartSLOduration=2.904695079 podStartE2EDuration="2.904695079s" podCreationTimestamp="2025-12-09 15:33:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:33:04.893110795 +0000 UTC m=+1472.047854783" watchObservedRunningTime="2025-12-09 15:33:04.904695079 +0000 UTC m=+1472.059439067" Dec 09 15:33:05 crc kubenswrapper[4716]: I1209 15:33:05.856641 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-64jmg" event={"ID":"8fd29da9-c781-4e6d-857c-17f84d72c639","Type":"ContainerStarted","Data":"ec5055e85a18c1c7af53bbce65d932276d5af1b014a4baab5a0cb734967c2c57"} Dec 09 15:33:05 crc kubenswrapper[4716]: I1209 15:33:05.886542 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-64jmg" podStartSLOduration=4.227502327 podStartE2EDuration="47.886524186s" podCreationTimestamp="2025-12-09 15:32:18 +0000 UTC" firstStartedPulling="2025-12-09 15:32:21.025800783 +0000 UTC m=+1428.180544771" lastFinishedPulling="2025-12-09 15:33:04.684822642 +0000 UTC m=+1471.839566630" observedRunningTime="2025-12-09 15:33:05.883089848 +0000 UTC m=+1473.037833836" watchObservedRunningTime="2025-12-09 15:33:05.886524186 +0000 UTC m=+1473.041268174" Dec 09 15:33:07 crc kubenswrapper[4716]: I1209 15:33:07.883307 4716 generic.go:334] "Generic (PLEG): container finished" podID="e1fa7713-f67e-45a2-81c0-73a56280f744" containerID="c2fa73a1329bbedc77ddddf004e0c6c1f8f493e99de0f806acb9cf4f307e172c" exitCode=0 Dec 09 15:33:07 crc kubenswrapper[4716]: I1209 15:33:07.883386 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-xr25h" event={"ID":"e1fa7713-f67e-45a2-81c0-73a56280f744","Type":"ContainerDied","Data":"c2fa73a1329bbedc77ddddf004e0c6c1f8f493e99de0f806acb9cf4f307e172c"} Dec 09 15:33:08 crc kubenswrapper[4716]: I1209 15:33:08.613600 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-7rkmg" Dec 09 15:33:08 crc kubenswrapper[4716]: I1209 15:33:08.648037 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff2490f6-9bf3-41a3-a2a5-48e1e7d26738-config-data\") pod \"ff2490f6-9bf3-41a3-a2a5-48e1e7d26738\" (UID: \"ff2490f6-9bf3-41a3-a2a5-48e1e7d26738\") " Dec 09 15:33:08 crc kubenswrapper[4716]: I1209 15:33:08.648085 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vqfx9\" (UniqueName: \"kubernetes.io/projected/ff2490f6-9bf3-41a3-a2a5-48e1e7d26738-kube-api-access-vqfx9\") pod \"ff2490f6-9bf3-41a3-a2a5-48e1e7d26738\" (UID: \"ff2490f6-9bf3-41a3-a2a5-48e1e7d26738\") " Dec 09 15:33:08 crc kubenswrapper[4716]: I1209 15:33:08.648134 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ff2490f6-9bf3-41a3-a2a5-48e1e7d26738-fernet-keys\") pod \"ff2490f6-9bf3-41a3-a2a5-48e1e7d26738\" (UID: \"ff2490f6-9bf3-41a3-a2a5-48e1e7d26738\") " Dec 09 15:33:08 crc kubenswrapper[4716]: I1209 15:33:08.648247 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ff2490f6-9bf3-41a3-a2a5-48e1e7d26738-credential-keys\") pod \"ff2490f6-9bf3-41a3-a2a5-48e1e7d26738\" (UID: \"ff2490f6-9bf3-41a3-a2a5-48e1e7d26738\") " Dec 09 15:33:08 crc kubenswrapper[4716]: I1209 15:33:08.648355 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff2490f6-9bf3-41a3-a2a5-48e1e7d26738-combined-ca-bundle\") pod \"ff2490f6-9bf3-41a3-a2a5-48e1e7d26738\" (UID: \"ff2490f6-9bf3-41a3-a2a5-48e1e7d26738\") " Dec 09 15:33:08 crc kubenswrapper[4716]: I1209 15:33:08.648403 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ff2490f6-9bf3-41a3-a2a5-48e1e7d26738-scripts\") pod \"ff2490f6-9bf3-41a3-a2a5-48e1e7d26738\" (UID: \"ff2490f6-9bf3-41a3-a2a5-48e1e7d26738\") " Dec 09 15:33:08 crc kubenswrapper[4716]: I1209 15:33:08.662250 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff2490f6-9bf3-41a3-a2a5-48e1e7d26738-kube-api-access-vqfx9" (OuterVolumeSpecName: "kube-api-access-vqfx9") pod "ff2490f6-9bf3-41a3-a2a5-48e1e7d26738" (UID: "ff2490f6-9bf3-41a3-a2a5-48e1e7d26738"). InnerVolumeSpecName "kube-api-access-vqfx9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:33:08 crc kubenswrapper[4716]: I1209 15:33:08.663066 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff2490f6-9bf3-41a3-a2a5-48e1e7d26738-scripts" (OuterVolumeSpecName: "scripts") pod "ff2490f6-9bf3-41a3-a2a5-48e1e7d26738" (UID: "ff2490f6-9bf3-41a3-a2a5-48e1e7d26738"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:33:08 crc kubenswrapper[4716]: I1209 15:33:08.671043 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff2490f6-9bf3-41a3-a2a5-48e1e7d26738-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "ff2490f6-9bf3-41a3-a2a5-48e1e7d26738" (UID: "ff2490f6-9bf3-41a3-a2a5-48e1e7d26738"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:33:08 crc kubenswrapper[4716]: I1209 15:33:08.671179 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff2490f6-9bf3-41a3-a2a5-48e1e7d26738-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "ff2490f6-9bf3-41a3-a2a5-48e1e7d26738" (UID: "ff2490f6-9bf3-41a3-a2a5-48e1e7d26738"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:33:08 crc kubenswrapper[4716]: I1209 15:33:08.733731 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff2490f6-9bf3-41a3-a2a5-48e1e7d26738-config-data" (OuterVolumeSpecName: "config-data") pod "ff2490f6-9bf3-41a3-a2a5-48e1e7d26738" (UID: "ff2490f6-9bf3-41a3-a2a5-48e1e7d26738"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:33:08 crc kubenswrapper[4716]: I1209 15:33:08.734335 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff2490f6-9bf3-41a3-a2a5-48e1e7d26738-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ff2490f6-9bf3-41a3-a2a5-48e1e7d26738" (UID: "ff2490f6-9bf3-41a3-a2a5-48e1e7d26738"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:33:08 crc kubenswrapper[4716]: I1209 15:33:08.751931 4716 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ff2490f6-9bf3-41a3-a2a5-48e1e7d26738-credential-keys\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:08 crc kubenswrapper[4716]: I1209 15:33:08.751971 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff2490f6-9bf3-41a3-a2a5-48e1e7d26738-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:08 crc kubenswrapper[4716]: I1209 15:33:08.751985 4716 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ff2490f6-9bf3-41a3-a2a5-48e1e7d26738-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:08 crc kubenswrapper[4716]: I1209 15:33:08.751994 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff2490f6-9bf3-41a3-a2a5-48e1e7d26738-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:08 crc kubenswrapper[4716]: I1209 15:33:08.752008 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vqfx9\" (UniqueName: \"kubernetes.io/projected/ff2490f6-9bf3-41a3-a2a5-48e1e7d26738-kube-api-access-vqfx9\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:08 crc kubenswrapper[4716]: I1209 15:33:08.752018 4716 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ff2490f6-9bf3-41a3-a2a5-48e1e7d26738-fernet-keys\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:08 crc kubenswrapper[4716]: I1209 15:33:08.903606 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9c5e102f-3947-4eae-a8de-78bb48d177e9","Type":"ContainerStarted","Data":"54fca364a08d8ad438e7286e581067fd081306d7f4221a1cae4cd2232d7eefdd"} Dec 09 15:33:08 crc kubenswrapper[4716]: I1209 15:33:08.907184 4716 generic.go:334] "Generic (PLEG): container finished" podID="8fd29da9-c781-4e6d-857c-17f84d72c639" containerID="ec5055e85a18c1c7af53bbce65d932276d5af1b014a4baab5a0cb734967c2c57" exitCode=0 Dec 09 15:33:08 crc kubenswrapper[4716]: I1209 15:33:08.908492 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-64jmg" event={"ID":"8fd29da9-c781-4e6d-857c-17f84d72c639","Type":"ContainerDied","Data":"ec5055e85a18c1c7af53bbce65d932276d5af1b014a4baab5a0cb734967c2c57"} Dec 09 15:33:08 crc kubenswrapper[4716]: I1209 15:33:08.916601 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-7rkmg" Dec 09 15:33:08 crc kubenswrapper[4716]: I1209 15:33:08.916708 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-7rkmg" event={"ID":"ff2490f6-9bf3-41a3-a2a5-48e1e7d26738","Type":"ContainerDied","Data":"f0339704c1566cf5fe3e24323d3aae4dc64cee8fb99309372d066265b5ded410"} Dec 09 15:33:08 crc kubenswrapper[4716]: I1209 15:33:08.916743 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f0339704c1566cf5fe3e24323d3aae4dc64cee8fb99309372d066265b5ded410" Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.244968 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-xr25h" Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.274197 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-55f844cf75-dj8vd" Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.348475 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-cb68t"] Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.353753 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-785d8bcb8c-cb68t" podUID="e9b49f6e-b7fe-4dd8-81d2-738fd521c166" containerName="dnsmasq-dns" containerID="cri-o://eefa6dbc722b250a9dcafef33492b3b9d14adef185af81ecbd15a3553c269d65" gracePeriod=10 Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.376353 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e1fa7713-f67e-45a2-81c0-73a56280f744-combined-ca-bundle\") pod \"e1fa7713-f67e-45a2-81c0-73a56280f744\" (UID: \"e1fa7713-f67e-45a2-81c0-73a56280f744\") " Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.376601 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2qchn\" (UniqueName: \"kubernetes.io/projected/e1fa7713-f67e-45a2-81c0-73a56280f744-kube-api-access-2qchn\") pod \"e1fa7713-f67e-45a2-81c0-73a56280f744\" (UID: \"e1fa7713-f67e-45a2-81c0-73a56280f744\") " Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.376677 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e1fa7713-f67e-45a2-81c0-73a56280f744-config-data\") pod \"e1fa7713-f67e-45a2-81c0-73a56280f744\" (UID: \"e1fa7713-f67e-45a2-81c0-73a56280f744\") " Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.392844 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e1fa7713-f67e-45a2-81c0-73a56280f744-kube-api-access-2qchn" (OuterVolumeSpecName: "kube-api-access-2qchn") pod "e1fa7713-f67e-45a2-81c0-73a56280f744" (UID: "e1fa7713-f67e-45a2-81c0-73a56280f744"). InnerVolumeSpecName "kube-api-access-2qchn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.451858 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e1fa7713-f67e-45a2-81c0-73a56280f744-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e1fa7713-f67e-45a2-81c0-73a56280f744" (UID: "e1fa7713-f67e-45a2-81c0-73a56280f744"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.487940 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e1fa7713-f67e-45a2-81c0-73a56280f744-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.487992 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2qchn\" (UniqueName: \"kubernetes.io/projected/e1fa7713-f67e-45a2-81c0-73a56280f744-kube-api-access-2qchn\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.494577 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e1fa7713-f67e-45a2-81c0-73a56280f744-config-data" (OuterVolumeSpecName: "config-data") pod "e1fa7713-f67e-45a2-81c0-73a56280f744" (UID: "e1fa7713-f67e-45a2-81c0-73a56280f744"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.592355 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e1fa7713-f67e-45a2-81c0-73a56280f744-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.675375 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-785d8bcb8c-cb68t" podUID="e9b49f6e-b7fe-4dd8-81d2-738fd521c166" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.181:5353: connect: connection refused" Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.755333 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-56fcdcb665-5z7hm"] Dec 09 15:33:09 crc kubenswrapper[4716]: E1209 15:33:09.755901 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff2490f6-9bf3-41a3-a2a5-48e1e7d26738" containerName="keystone-bootstrap" Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.755914 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff2490f6-9bf3-41a3-a2a5-48e1e7d26738" containerName="keystone-bootstrap" Dec 09 15:33:09 crc kubenswrapper[4716]: E1209 15:33:09.755940 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1fa7713-f67e-45a2-81c0-73a56280f744" containerName="heat-db-sync" Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.755955 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1fa7713-f67e-45a2-81c0-73a56280f744" containerName="heat-db-sync" Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.756174 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff2490f6-9bf3-41a3-a2a5-48e1e7d26738" containerName="keystone-bootstrap" Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.756189 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="e1fa7713-f67e-45a2-81c0-73a56280f744" containerName="heat-db-sync" Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.764077 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-56fcdcb665-5z7hm" Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.768492 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.768534 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.768609 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.768717 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.768766 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.771212 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-kzrzm" Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.793567 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-56fcdcb665-5z7hm"] Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.897541 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/21c0272c-4b2c-41f2-8777-06d494046fb8-fernet-keys\") pod \"keystone-56fcdcb665-5z7hm\" (UID: \"21c0272c-4b2c-41f2-8777-06d494046fb8\") " pod="openstack/keystone-56fcdcb665-5z7hm" Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.897617 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/21c0272c-4b2c-41f2-8777-06d494046fb8-credential-keys\") pod \"keystone-56fcdcb665-5z7hm\" (UID: \"21c0272c-4b2c-41f2-8777-06d494046fb8\") " pod="openstack/keystone-56fcdcb665-5z7hm" Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.897694 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21c0272c-4b2c-41f2-8777-06d494046fb8-config-data\") pod \"keystone-56fcdcb665-5z7hm\" (UID: \"21c0272c-4b2c-41f2-8777-06d494046fb8\") " pod="openstack/keystone-56fcdcb665-5z7hm" Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.897920 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21c0272c-4b2c-41f2-8777-06d494046fb8-combined-ca-bundle\") pod \"keystone-56fcdcb665-5z7hm\" (UID: \"21c0272c-4b2c-41f2-8777-06d494046fb8\") " pod="openstack/keystone-56fcdcb665-5z7hm" Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.898072 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/21c0272c-4b2c-41f2-8777-06d494046fb8-internal-tls-certs\") pod \"keystone-56fcdcb665-5z7hm\" (UID: \"21c0272c-4b2c-41f2-8777-06d494046fb8\") " pod="openstack/keystone-56fcdcb665-5z7hm" Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.898112 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/21c0272c-4b2c-41f2-8777-06d494046fb8-scripts\") pod \"keystone-56fcdcb665-5z7hm\" (UID: \"21c0272c-4b2c-41f2-8777-06d494046fb8\") " pod="openstack/keystone-56fcdcb665-5z7hm" Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.898163 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/21c0272c-4b2c-41f2-8777-06d494046fb8-public-tls-certs\") pod \"keystone-56fcdcb665-5z7hm\" (UID: \"21c0272c-4b2c-41f2-8777-06d494046fb8\") " pod="openstack/keystone-56fcdcb665-5z7hm" Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.898332 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6k92d\" (UniqueName: \"kubernetes.io/projected/21c0272c-4b2c-41f2-8777-06d494046fb8-kube-api-access-6k92d\") pod \"keystone-56fcdcb665-5z7hm\" (UID: \"21c0272c-4b2c-41f2-8777-06d494046fb8\") " pod="openstack/keystone-56fcdcb665-5z7hm" Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.967213 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-xr25h" Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.967208 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-xr25h" event={"ID":"e1fa7713-f67e-45a2-81c0-73a56280f744","Type":"ContainerDied","Data":"dfac4872f3922d5a0b16ce68f61b7fc2ada3865af3e4d09569530f6d443746fc"} Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.967378 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dfac4872f3922d5a0b16ce68f61b7fc2ada3865af3e4d09569530f6d443746fc" Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.976689 4716 generic.go:334] "Generic (PLEG): container finished" podID="e9b49f6e-b7fe-4dd8-81d2-738fd521c166" containerID="eefa6dbc722b250a9dcafef33492b3b9d14adef185af81ecbd15a3553c269d65" exitCode=0 Dec 09 15:33:09 crc kubenswrapper[4716]: I1209 15:33:09.976920 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-cb68t" event={"ID":"e9b49f6e-b7fe-4dd8-81d2-738fd521c166","Type":"ContainerDied","Data":"eefa6dbc722b250a9dcafef33492b3b9d14adef185af81ecbd15a3553c269d65"} Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.000819 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/21c0272c-4b2c-41f2-8777-06d494046fb8-fernet-keys\") pod \"keystone-56fcdcb665-5z7hm\" (UID: \"21c0272c-4b2c-41f2-8777-06d494046fb8\") " pod="openstack/keystone-56fcdcb665-5z7hm" Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.000866 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/21c0272c-4b2c-41f2-8777-06d494046fb8-credential-keys\") pod \"keystone-56fcdcb665-5z7hm\" (UID: \"21c0272c-4b2c-41f2-8777-06d494046fb8\") " pod="openstack/keystone-56fcdcb665-5z7hm" Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.000948 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21c0272c-4b2c-41f2-8777-06d494046fb8-config-data\") pod \"keystone-56fcdcb665-5z7hm\" (UID: \"21c0272c-4b2c-41f2-8777-06d494046fb8\") " pod="openstack/keystone-56fcdcb665-5z7hm" Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.000988 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21c0272c-4b2c-41f2-8777-06d494046fb8-combined-ca-bundle\") pod \"keystone-56fcdcb665-5z7hm\" (UID: \"21c0272c-4b2c-41f2-8777-06d494046fb8\") " pod="openstack/keystone-56fcdcb665-5z7hm" Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.001019 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/21c0272c-4b2c-41f2-8777-06d494046fb8-internal-tls-certs\") pod \"keystone-56fcdcb665-5z7hm\" (UID: \"21c0272c-4b2c-41f2-8777-06d494046fb8\") " pod="openstack/keystone-56fcdcb665-5z7hm" Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.001039 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/21c0272c-4b2c-41f2-8777-06d494046fb8-scripts\") pod \"keystone-56fcdcb665-5z7hm\" (UID: \"21c0272c-4b2c-41f2-8777-06d494046fb8\") " pod="openstack/keystone-56fcdcb665-5z7hm" Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.001064 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/21c0272c-4b2c-41f2-8777-06d494046fb8-public-tls-certs\") pod \"keystone-56fcdcb665-5z7hm\" (UID: \"21c0272c-4b2c-41f2-8777-06d494046fb8\") " pod="openstack/keystone-56fcdcb665-5z7hm" Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.001116 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6k92d\" (UniqueName: \"kubernetes.io/projected/21c0272c-4b2c-41f2-8777-06d494046fb8-kube-api-access-6k92d\") pod \"keystone-56fcdcb665-5z7hm\" (UID: \"21c0272c-4b2c-41f2-8777-06d494046fb8\") " pod="openstack/keystone-56fcdcb665-5z7hm" Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.008312 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21c0272c-4b2c-41f2-8777-06d494046fb8-combined-ca-bundle\") pod \"keystone-56fcdcb665-5z7hm\" (UID: \"21c0272c-4b2c-41f2-8777-06d494046fb8\") " pod="openstack/keystone-56fcdcb665-5z7hm" Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.008558 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/21c0272c-4b2c-41f2-8777-06d494046fb8-scripts\") pod \"keystone-56fcdcb665-5z7hm\" (UID: \"21c0272c-4b2c-41f2-8777-06d494046fb8\") " pod="openstack/keystone-56fcdcb665-5z7hm" Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.009903 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/21c0272c-4b2c-41f2-8777-06d494046fb8-credential-keys\") pod \"keystone-56fcdcb665-5z7hm\" (UID: \"21c0272c-4b2c-41f2-8777-06d494046fb8\") " pod="openstack/keystone-56fcdcb665-5z7hm" Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.010785 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21c0272c-4b2c-41f2-8777-06d494046fb8-config-data\") pod \"keystone-56fcdcb665-5z7hm\" (UID: \"21c0272c-4b2c-41f2-8777-06d494046fb8\") " pod="openstack/keystone-56fcdcb665-5z7hm" Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.014599 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/21c0272c-4b2c-41f2-8777-06d494046fb8-internal-tls-certs\") pod \"keystone-56fcdcb665-5z7hm\" (UID: \"21c0272c-4b2c-41f2-8777-06d494046fb8\") " pod="openstack/keystone-56fcdcb665-5z7hm" Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.015752 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/21c0272c-4b2c-41f2-8777-06d494046fb8-fernet-keys\") pod \"keystone-56fcdcb665-5z7hm\" (UID: \"21c0272c-4b2c-41f2-8777-06d494046fb8\") " pod="openstack/keystone-56fcdcb665-5z7hm" Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.021612 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/21c0272c-4b2c-41f2-8777-06d494046fb8-public-tls-certs\") pod \"keystone-56fcdcb665-5z7hm\" (UID: \"21c0272c-4b2c-41f2-8777-06d494046fb8\") " pod="openstack/keystone-56fcdcb665-5z7hm" Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.025188 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6k92d\" (UniqueName: \"kubernetes.io/projected/21c0272c-4b2c-41f2-8777-06d494046fb8-kube-api-access-6k92d\") pod \"keystone-56fcdcb665-5z7hm\" (UID: \"21c0272c-4b2c-41f2-8777-06d494046fb8\") " pod="openstack/keystone-56fcdcb665-5z7hm" Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.094326 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-56fcdcb665-5z7hm" Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.123345 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-785d8bcb8c-cb68t" Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.204494 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e9b49f6e-b7fe-4dd8-81d2-738fd521c166-ovsdbserver-sb\") pod \"e9b49f6e-b7fe-4dd8-81d2-738fd521c166\" (UID: \"e9b49f6e-b7fe-4dd8-81d2-738fd521c166\") " Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.204594 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mwjq4\" (UniqueName: \"kubernetes.io/projected/e9b49f6e-b7fe-4dd8-81d2-738fd521c166-kube-api-access-mwjq4\") pod \"e9b49f6e-b7fe-4dd8-81d2-738fd521c166\" (UID: \"e9b49f6e-b7fe-4dd8-81d2-738fd521c166\") " Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.204650 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e9b49f6e-b7fe-4dd8-81d2-738fd521c166-ovsdbserver-nb\") pod \"e9b49f6e-b7fe-4dd8-81d2-738fd521c166\" (UID: \"e9b49f6e-b7fe-4dd8-81d2-738fd521c166\") " Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.204825 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9b49f6e-b7fe-4dd8-81d2-738fd521c166-config\") pod \"e9b49f6e-b7fe-4dd8-81d2-738fd521c166\" (UID: \"e9b49f6e-b7fe-4dd8-81d2-738fd521c166\") " Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.204885 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e9b49f6e-b7fe-4dd8-81d2-738fd521c166-dns-svc\") pod \"e9b49f6e-b7fe-4dd8-81d2-738fd521c166\" (UID: \"e9b49f6e-b7fe-4dd8-81d2-738fd521c166\") " Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.204941 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e9b49f6e-b7fe-4dd8-81d2-738fd521c166-dns-swift-storage-0\") pod \"e9b49f6e-b7fe-4dd8-81d2-738fd521c166\" (UID: \"e9b49f6e-b7fe-4dd8-81d2-738fd521c166\") " Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.216946 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e9b49f6e-b7fe-4dd8-81d2-738fd521c166-kube-api-access-mwjq4" (OuterVolumeSpecName: "kube-api-access-mwjq4") pod "e9b49f6e-b7fe-4dd8-81d2-738fd521c166" (UID: "e9b49f6e-b7fe-4dd8-81d2-738fd521c166"). InnerVolumeSpecName "kube-api-access-mwjq4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.307482 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mwjq4\" (UniqueName: \"kubernetes.io/projected/e9b49f6e-b7fe-4dd8-81d2-738fd521c166-kube-api-access-mwjq4\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.341639 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e9b49f6e-b7fe-4dd8-81d2-738fd521c166-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e9b49f6e-b7fe-4dd8-81d2-738fd521c166" (UID: "e9b49f6e-b7fe-4dd8-81d2-738fd521c166"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.347964 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e9b49f6e-b7fe-4dd8-81d2-738fd521c166-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e9b49f6e-b7fe-4dd8-81d2-738fd521c166" (UID: "e9b49f6e-b7fe-4dd8-81d2-738fd521c166"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.411546 4716 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e9b49f6e-b7fe-4dd8-81d2-738fd521c166-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.411591 4716 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e9b49f6e-b7fe-4dd8-81d2-738fd521c166-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.431921 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e9b49f6e-b7fe-4dd8-81d2-738fd521c166-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "e9b49f6e-b7fe-4dd8-81d2-738fd521c166" (UID: "e9b49f6e-b7fe-4dd8-81d2-738fd521c166"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.442199 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e9b49f6e-b7fe-4dd8-81d2-738fd521c166-config" (OuterVolumeSpecName: "config") pod "e9b49f6e-b7fe-4dd8-81d2-738fd521c166" (UID: "e9b49f6e-b7fe-4dd8-81d2-738fd521c166"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.445209 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e9b49f6e-b7fe-4dd8-81d2-738fd521c166-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e9b49f6e-b7fe-4dd8-81d2-738fd521c166" (UID: "e9b49f6e-b7fe-4dd8-81d2-738fd521c166"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.511963 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-64jmg" Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.514029 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9b49f6e-b7fe-4dd8-81d2-738fd521c166-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.514056 4716 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e9b49f6e-b7fe-4dd8-81d2-738fd521c166-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.514068 4716 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e9b49f6e-b7fe-4dd8-81d2-738fd521c166-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.615481 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8fd29da9-c781-4e6d-857c-17f84d72c639-combined-ca-bundle\") pod \"8fd29da9-c781-4e6d-857c-17f84d72c639\" (UID: \"8fd29da9-c781-4e6d-857c-17f84d72c639\") " Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.615865 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/8fd29da9-c781-4e6d-857c-17f84d72c639-db-sync-config-data\") pod \"8fd29da9-c781-4e6d-857c-17f84d72c639\" (UID: \"8fd29da9-c781-4e6d-857c-17f84d72c639\") " Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.615893 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jshx4\" (UniqueName: \"kubernetes.io/projected/8fd29da9-c781-4e6d-857c-17f84d72c639-kube-api-access-jshx4\") pod \"8fd29da9-c781-4e6d-857c-17f84d72c639\" (UID: \"8fd29da9-c781-4e6d-857c-17f84d72c639\") " Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.629797 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8fd29da9-c781-4e6d-857c-17f84d72c639-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "8fd29da9-c781-4e6d-857c-17f84d72c639" (UID: "8fd29da9-c781-4e6d-857c-17f84d72c639"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.669391 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8fd29da9-c781-4e6d-857c-17f84d72c639-kube-api-access-jshx4" (OuterVolumeSpecName: "kube-api-access-jshx4") pod "8fd29da9-c781-4e6d-857c-17f84d72c639" (UID: "8fd29da9-c781-4e6d-857c-17f84d72c639"). InnerVolumeSpecName "kube-api-access-jshx4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.672747 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8fd29da9-c781-4e6d-857c-17f84d72c639-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8fd29da9-c781-4e6d-857c-17f84d72c639" (UID: "8fd29da9-c781-4e6d-857c-17f84d72c639"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.718090 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8fd29da9-c781-4e6d-857c-17f84d72c639-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.718128 4716 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/8fd29da9-c781-4e6d-857c-17f84d72c639-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.718138 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jshx4\" (UniqueName: \"kubernetes.io/projected/8fd29da9-c781-4e6d-857c-17f84d72c639-kube-api-access-jshx4\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:10 crc kubenswrapper[4716]: I1209 15:33:10.761204 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-56fcdcb665-5z7hm"] Dec 09 15:33:10 crc kubenswrapper[4716]: W1209 15:33:10.765912 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod21c0272c_4b2c_41f2_8777_06d494046fb8.slice/crio-5693e87e4d525c97a1f75e544d203ef1dd46420457fdf6cb0c534714efce7330 WatchSource:0}: Error finding container 5693e87e4d525c97a1f75e544d203ef1dd46420457fdf6cb0c534714efce7330: Status 404 returned error can't find the container with id 5693e87e4d525c97a1f75e544d203ef1dd46420457fdf6cb0c534714efce7330 Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.040596 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-64jmg" event={"ID":"8fd29da9-c781-4e6d-857c-17f84d72c639","Type":"ContainerDied","Data":"eacea93afa4677e3e8be7ef754de20479097de957443f661a6813d08fba41339"} Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.041062 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eacea93afa4677e3e8be7ef754de20479097de957443f661a6813d08fba41339" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.041173 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-64jmg" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.050007 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-56fcdcb665-5z7hm" event={"ID":"21c0272c-4b2c-41f2-8777-06d494046fb8","Type":"ContainerStarted","Data":"5693e87e4d525c97a1f75e544d203ef1dd46420457fdf6cb0c534714efce7330"} Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.062561 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-cb68t" event={"ID":"e9b49f6e-b7fe-4dd8-81d2-738fd521c166","Type":"ContainerDied","Data":"6d35ede9ba59694d1927d7b7e8fd8892a84ae7e0eb73ecf4283f98726f8ae4ec"} Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.062640 4716 scope.go:117] "RemoveContainer" containerID="eefa6dbc722b250a9dcafef33492b3b9d14adef185af81ecbd15a3553c269d65" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.062834 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-785d8bcb8c-cb68t" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.155203 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-5456946d99-njfkq"] Dec 09 15:33:11 crc kubenswrapper[4716]: E1209 15:33:11.155789 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fd29da9-c781-4e6d-857c-17f84d72c639" containerName="barbican-db-sync" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.155808 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fd29da9-c781-4e6d-857c-17f84d72c639" containerName="barbican-db-sync" Dec 09 15:33:11 crc kubenswrapper[4716]: E1209 15:33:11.155825 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9b49f6e-b7fe-4dd8-81d2-738fd521c166" containerName="init" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.155832 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9b49f6e-b7fe-4dd8-81d2-738fd521c166" containerName="init" Dec 09 15:33:11 crc kubenswrapper[4716]: E1209 15:33:11.155858 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9b49f6e-b7fe-4dd8-81d2-738fd521c166" containerName="dnsmasq-dns" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.155864 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9b49f6e-b7fe-4dd8-81d2-738fd521c166" containerName="dnsmasq-dns" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.156123 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="8fd29da9-c781-4e6d-857c-17f84d72c639" containerName="barbican-db-sync" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.156158 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9b49f6e-b7fe-4dd8-81d2-738fd521c166" containerName="dnsmasq-dns" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.157765 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-5456946d99-njfkq" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.162094 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-gnxqd" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.162905 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.164410 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.199042 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-79554b67bb-ppw4d"] Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.202595 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-79554b67bb-ppw4d" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.212811 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.238282 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1a338543-0982-4a3c-ac49-49f25a321870-logs\") pod \"barbican-worker-5456946d99-njfkq\" (UID: \"1a338543-0982-4a3c-ac49-49f25a321870\") " pod="openstack/barbican-worker-5456946d99-njfkq" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.238387 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1a338543-0982-4a3c-ac49-49f25a321870-config-data-custom\") pod \"barbican-worker-5456946d99-njfkq\" (UID: \"1a338543-0982-4a3c-ac49-49f25a321870\") " pod="openstack/barbican-worker-5456946d99-njfkq" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.238424 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a338543-0982-4a3c-ac49-49f25a321870-combined-ca-bundle\") pod \"barbican-worker-5456946d99-njfkq\" (UID: \"1a338543-0982-4a3c-ac49-49f25a321870\") " pod="openstack/barbican-worker-5456946d99-njfkq" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.238516 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a338543-0982-4a3c-ac49-49f25a321870-config-data\") pod \"barbican-worker-5456946d99-njfkq\" (UID: \"1a338543-0982-4a3c-ac49-49f25a321870\") " pod="openstack/barbican-worker-5456946d99-njfkq" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.238675 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4sv2c\" (UniqueName: \"kubernetes.io/projected/1a338543-0982-4a3c-ac49-49f25a321870-kube-api-access-4sv2c\") pod \"barbican-worker-5456946d99-njfkq\" (UID: \"1a338543-0982-4a3c-ac49-49f25a321870\") " pod="openstack/barbican-worker-5456946d99-njfkq" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.265935 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-5456946d99-njfkq"] Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.306769 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-cb68t"] Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.336633 4716 scope.go:117] "RemoveContainer" containerID="96a0697c3bb7cbbe225ae1c8c1188987d1266dc9fd5ff51bd6d38e04e08586a8" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.375490 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/af26f699-cc84-4ddf-a63a-450b6bb5cfa2-config-data\") pod \"barbican-keystone-listener-79554b67bb-ppw4d\" (UID: \"af26f699-cc84-4ddf-a63a-450b6bb5cfa2\") " pod="openstack/barbican-keystone-listener-79554b67bb-ppw4d" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.375613 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a338543-0982-4a3c-ac49-49f25a321870-config-data\") pod \"barbican-worker-5456946d99-njfkq\" (UID: \"1a338543-0982-4a3c-ac49-49f25a321870\") " pod="openstack/barbican-worker-5456946d99-njfkq" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.375884 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4sv2c\" (UniqueName: \"kubernetes.io/projected/1a338543-0982-4a3c-ac49-49f25a321870-kube-api-access-4sv2c\") pod \"barbican-worker-5456946d99-njfkq\" (UID: \"1a338543-0982-4a3c-ac49-49f25a321870\") " pod="openstack/barbican-worker-5456946d99-njfkq" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.375924 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/af26f699-cc84-4ddf-a63a-450b6bb5cfa2-config-data-custom\") pod \"barbican-keystone-listener-79554b67bb-ppw4d\" (UID: \"af26f699-cc84-4ddf-a63a-450b6bb5cfa2\") " pod="openstack/barbican-keystone-listener-79554b67bb-ppw4d" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.391924 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1a338543-0982-4a3c-ac49-49f25a321870-logs\") pod \"barbican-worker-5456946d99-njfkq\" (UID: \"1a338543-0982-4a3c-ac49-49f25a321870\") " pod="openstack/barbican-worker-5456946d99-njfkq" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.391998 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mnfd8\" (UniqueName: \"kubernetes.io/projected/af26f699-cc84-4ddf-a63a-450b6bb5cfa2-kube-api-access-mnfd8\") pod \"barbican-keystone-listener-79554b67bb-ppw4d\" (UID: \"af26f699-cc84-4ddf-a63a-450b6bb5cfa2\") " pod="openstack/barbican-keystone-listener-79554b67bb-ppw4d" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.392140 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af26f699-cc84-4ddf-a63a-450b6bb5cfa2-combined-ca-bundle\") pod \"barbican-keystone-listener-79554b67bb-ppw4d\" (UID: \"af26f699-cc84-4ddf-a63a-450b6bb5cfa2\") " pod="openstack/barbican-keystone-listener-79554b67bb-ppw4d" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.392183 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1a338543-0982-4a3c-ac49-49f25a321870-config-data-custom\") pod \"barbican-worker-5456946d99-njfkq\" (UID: \"1a338543-0982-4a3c-ac49-49f25a321870\") " pod="openstack/barbican-worker-5456946d99-njfkq" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.392232 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a338543-0982-4a3c-ac49-49f25a321870-combined-ca-bundle\") pod \"barbican-worker-5456946d99-njfkq\" (UID: \"1a338543-0982-4a3c-ac49-49f25a321870\") " pod="openstack/barbican-worker-5456946d99-njfkq" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.392325 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/af26f699-cc84-4ddf-a63a-450b6bb5cfa2-logs\") pod \"barbican-keystone-listener-79554b67bb-ppw4d\" (UID: \"af26f699-cc84-4ddf-a63a-450b6bb5cfa2\") " pod="openstack/barbican-keystone-listener-79554b67bb-ppw4d" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.396782 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1a338543-0982-4a3c-ac49-49f25a321870-logs\") pod \"barbican-worker-5456946d99-njfkq\" (UID: \"1a338543-0982-4a3c-ac49-49f25a321870\") " pod="openstack/barbican-worker-5456946d99-njfkq" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.414247 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a338543-0982-4a3c-ac49-49f25a321870-config-data\") pod \"barbican-worker-5456946d99-njfkq\" (UID: \"1a338543-0982-4a3c-ac49-49f25a321870\") " pod="openstack/barbican-worker-5456946d99-njfkq" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.415404 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1a338543-0982-4a3c-ac49-49f25a321870-config-data-custom\") pod \"barbican-worker-5456946d99-njfkq\" (UID: \"1a338543-0982-4a3c-ac49-49f25a321870\") " pod="openstack/barbican-worker-5456946d99-njfkq" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.427451 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a338543-0982-4a3c-ac49-49f25a321870-combined-ca-bundle\") pod \"barbican-worker-5456946d99-njfkq\" (UID: \"1a338543-0982-4a3c-ac49-49f25a321870\") " pod="openstack/barbican-worker-5456946d99-njfkq" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.472458 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4sv2c\" (UniqueName: \"kubernetes.io/projected/1a338543-0982-4a3c-ac49-49f25a321870-kube-api-access-4sv2c\") pod \"barbican-worker-5456946d99-njfkq\" (UID: \"1a338543-0982-4a3c-ac49-49f25a321870\") " pod="openstack/barbican-worker-5456946d99-njfkq" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.475858 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-79554b67bb-ppw4d"] Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.500188 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mnfd8\" (UniqueName: \"kubernetes.io/projected/af26f699-cc84-4ddf-a63a-450b6bb5cfa2-kube-api-access-mnfd8\") pod \"barbican-keystone-listener-79554b67bb-ppw4d\" (UID: \"af26f699-cc84-4ddf-a63a-450b6bb5cfa2\") " pod="openstack/barbican-keystone-listener-79554b67bb-ppw4d" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.504832 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af26f699-cc84-4ddf-a63a-450b6bb5cfa2-combined-ca-bundle\") pod \"barbican-keystone-listener-79554b67bb-ppw4d\" (UID: \"af26f699-cc84-4ddf-a63a-450b6bb5cfa2\") " pod="openstack/barbican-keystone-listener-79554b67bb-ppw4d" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.505031 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/af26f699-cc84-4ddf-a63a-450b6bb5cfa2-logs\") pod \"barbican-keystone-listener-79554b67bb-ppw4d\" (UID: \"af26f699-cc84-4ddf-a63a-450b6bb5cfa2\") " pod="openstack/barbican-keystone-listener-79554b67bb-ppw4d" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.505092 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/af26f699-cc84-4ddf-a63a-450b6bb5cfa2-config-data\") pod \"barbican-keystone-listener-79554b67bb-ppw4d\" (UID: \"af26f699-cc84-4ddf-a63a-450b6bb5cfa2\") " pod="openstack/barbican-keystone-listener-79554b67bb-ppw4d" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.506908 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/af26f699-cc84-4ddf-a63a-450b6bb5cfa2-logs\") pod \"barbican-keystone-listener-79554b67bb-ppw4d\" (UID: \"af26f699-cc84-4ddf-a63a-450b6bb5cfa2\") " pod="openstack/barbican-keystone-listener-79554b67bb-ppw4d" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.510781 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-cb68t"] Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.511852 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-5456946d99-njfkq" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.516228 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af26f699-cc84-4ddf-a63a-450b6bb5cfa2-combined-ca-bundle\") pod \"barbican-keystone-listener-79554b67bb-ppw4d\" (UID: \"af26f699-cc84-4ddf-a63a-450b6bb5cfa2\") " pod="openstack/barbican-keystone-listener-79554b67bb-ppw4d" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.520661 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/af26f699-cc84-4ddf-a63a-450b6bb5cfa2-config-data-custom\") pod \"barbican-keystone-listener-79554b67bb-ppw4d\" (UID: \"af26f699-cc84-4ddf-a63a-450b6bb5cfa2\") " pod="openstack/barbican-keystone-listener-79554b67bb-ppw4d" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.524225 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mnfd8\" (UniqueName: \"kubernetes.io/projected/af26f699-cc84-4ddf-a63a-450b6bb5cfa2-kube-api-access-mnfd8\") pod \"barbican-keystone-listener-79554b67bb-ppw4d\" (UID: \"af26f699-cc84-4ddf-a63a-450b6bb5cfa2\") " pod="openstack/barbican-keystone-listener-79554b67bb-ppw4d" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.531911 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/af26f699-cc84-4ddf-a63a-450b6bb5cfa2-config-data\") pod \"barbican-keystone-listener-79554b67bb-ppw4d\" (UID: \"af26f699-cc84-4ddf-a63a-450b6bb5cfa2\") " pod="openstack/barbican-keystone-listener-79554b67bb-ppw4d" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.534822 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/af26f699-cc84-4ddf-a63a-450b6bb5cfa2-config-data-custom\") pod \"barbican-keystone-listener-79554b67bb-ppw4d\" (UID: \"af26f699-cc84-4ddf-a63a-450b6bb5cfa2\") " pod="openstack/barbican-keystone-listener-79554b67bb-ppw4d" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.572076 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-79554b67bb-ppw4d" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.692976 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-vpmgn"] Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.695356 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85ff748b95-vpmgn" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.763061 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-vpmgn"] Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.784521 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-5464d7ccfb-6g4nl"] Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.799113 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5464d7ccfb-6g4nl" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.803852 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.813799 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5464d7ccfb-6g4nl"] Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.842372 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3dc5901d-b785-4857-928c-9d3147d4f412-ovsdbserver-sb\") pod \"dnsmasq-dns-85ff748b95-vpmgn\" (UID: \"3dc5901d-b785-4857-928c-9d3147d4f412\") " pod="openstack/dnsmasq-dns-85ff748b95-vpmgn" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.842704 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-58k7z\" (UniqueName: \"kubernetes.io/projected/3dc5901d-b785-4857-928c-9d3147d4f412-kube-api-access-58k7z\") pod \"dnsmasq-dns-85ff748b95-vpmgn\" (UID: \"3dc5901d-b785-4857-928c-9d3147d4f412\") " pod="openstack/dnsmasq-dns-85ff748b95-vpmgn" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.842757 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3dc5901d-b785-4857-928c-9d3147d4f412-config\") pod \"dnsmasq-dns-85ff748b95-vpmgn\" (UID: \"3dc5901d-b785-4857-928c-9d3147d4f412\") " pod="openstack/dnsmasq-dns-85ff748b95-vpmgn" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.842785 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3dc5901d-b785-4857-928c-9d3147d4f412-dns-swift-storage-0\") pod \"dnsmasq-dns-85ff748b95-vpmgn\" (UID: \"3dc5901d-b785-4857-928c-9d3147d4f412\") " pod="openstack/dnsmasq-dns-85ff748b95-vpmgn" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.842830 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3dc5901d-b785-4857-928c-9d3147d4f412-ovsdbserver-nb\") pod \"dnsmasq-dns-85ff748b95-vpmgn\" (UID: \"3dc5901d-b785-4857-928c-9d3147d4f412\") " pod="openstack/dnsmasq-dns-85ff748b95-vpmgn" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.842915 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3dc5901d-b785-4857-928c-9d3147d4f412-dns-svc\") pod \"dnsmasq-dns-85ff748b95-vpmgn\" (UID: \"3dc5901d-b785-4857-928c-9d3147d4f412\") " pod="openstack/dnsmasq-dns-85ff748b95-vpmgn" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.946894 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hcbpv\" (UniqueName: \"kubernetes.io/projected/fb352458-dad5-4f43-898f-561101aec09f-kube-api-access-hcbpv\") pod \"barbican-api-5464d7ccfb-6g4nl\" (UID: \"fb352458-dad5-4f43-898f-561101aec09f\") " pod="openstack/barbican-api-5464d7ccfb-6g4nl" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.947264 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fb352458-dad5-4f43-898f-561101aec09f-logs\") pod \"barbican-api-5464d7ccfb-6g4nl\" (UID: \"fb352458-dad5-4f43-898f-561101aec09f\") " pod="openstack/barbican-api-5464d7ccfb-6g4nl" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.947332 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb352458-dad5-4f43-898f-561101aec09f-config-data\") pod \"barbican-api-5464d7ccfb-6g4nl\" (UID: \"fb352458-dad5-4f43-898f-561101aec09f\") " pod="openstack/barbican-api-5464d7ccfb-6g4nl" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.947358 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3dc5901d-b785-4857-928c-9d3147d4f412-ovsdbserver-sb\") pod \"dnsmasq-dns-85ff748b95-vpmgn\" (UID: \"3dc5901d-b785-4857-928c-9d3147d4f412\") " pod="openstack/dnsmasq-dns-85ff748b95-vpmgn" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.947445 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb352458-dad5-4f43-898f-561101aec09f-combined-ca-bundle\") pod \"barbican-api-5464d7ccfb-6g4nl\" (UID: \"fb352458-dad5-4f43-898f-561101aec09f\") " pod="openstack/barbican-api-5464d7ccfb-6g4nl" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.947487 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-58k7z\" (UniqueName: \"kubernetes.io/projected/3dc5901d-b785-4857-928c-9d3147d4f412-kube-api-access-58k7z\") pod \"dnsmasq-dns-85ff748b95-vpmgn\" (UID: \"3dc5901d-b785-4857-928c-9d3147d4f412\") " pod="openstack/dnsmasq-dns-85ff748b95-vpmgn" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.947528 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3dc5901d-b785-4857-928c-9d3147d4f412-config\") pod \"dnsmasq-dns-85ff748b95-vpmgn\" (UID: \"3dc5901d-b785-4857-928c-9d3147d4f412\") " pod="openstack/dnsmasq-dns-85ff748b95-vpmgn" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.947546 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3dc5901d-b785-4857-928c-9d3147d4f412-dns-swift-storage-0\") pod \"dnsmasq-dns-85ff748b95-vpmgn\" (UID: \"3dc5901d-b785-4857-928c-9d3147d4f412\") " pod="openstack/dnsmasq-dns-85ff748b95-vpmgn" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.947566 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fb352458-dad5-4f43-898f-561101aec09f-config-data-custom\") pod \"barbican-api-5464d7ccfb-6g4nl\" (UID: \"fb352458-dad5-4f43-898f-561101aec09f\") " pod="openstack/barbican-api-5464d7ccfb-6g4nl" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.947589 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3dc5901d-b785-4857-928c-9d3147d4f412-ovsdbserver-nb\") pod \"dnsmasq-dns-85ff748b95-vpmgn\" (UID: \"3dc5901d-b785-4857-928c-9d3147d4f412\") " pod="openstack/dnsmasq-dns-85ff748b95-vpmgn" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.947641 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3dc5901d-b785-4857-928c-9d3147d4f412-dns-svc\") pod \"dnsmasq-dns-85ff748b95-vpmgn\" (UID: \"3dc5901d-b785-4857-928c-9d3147d4f412\") " pod="openstack/dnsmasq-dns-85ff748b95-vpmgn" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.948493 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3dc5901d-b785-4857-928c-9d3147d4f412-dns-svc\") pod \"dnsmasq-dns-85ff748b95-vpmgn\" (UID: \"3dc5901d-b785-4857-928c-9d3147d4f412\") " pod="openstack/dnsmasq-dns-85ff748b95-vpmgn" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.949828 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3dc5901d-b785-4857-928c-9d3147d4f412-ovsdbserver-sb\") pod \"dnsmasq-dns-85ff748b95-vpmgn\" (UID: \"3dc5901d-b785-4857-928c-9d3147d4f412\") " pod="openstack/dnsmasq-dns-85ff748b95-vpmgn" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.950746 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3dc5901d-b785-4857-928c-9d3147d4f412-config\") pod \"dnsmasq-dns-85ff748b95-vpmgn\" (UID: \"3dc5901d-b785-4857-928c-9d3147d4f412\") " pod="openstack/dnsmasq-dns-85ff748b95-vpmgn" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.950791 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3dc5901d-b785-4857-928c-9d3147d4f412-ovsdbserver-nb\") pod \"dnsmasq-dns-85ff748b95-vpmgn\" (UID: \"3dc5901d-b785-4857-928c-9d3147d4f412\") " pod="openstack/dnsmasq-dns-85ff748b95-vpmgn" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.953576 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3dc5901d-b785-4857-928c-9d3147d4f412-dns-swift-storage-0\") pod \"dnsmasq-dns-85ff748b95-vpmgn\" (UID: \"3dc5901d-b785-4857-928c-9d3147d4f412\") " pod="openstack/dnsmasq-dns-85ff748b95-vpmgn" Dec 09 15:33:11 crc kubenswrapper[4716]: I1209 15:33:11.992441 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-58k7z\" (UniqueName: \"kubernetes.io/projected/3dc5901d-b785-4857-928c-9d3147d4f412-kube-api-access-58k7z\") pod \"dnsmasq-dns-85ff748b95-vpmgn\" (UID: \"3dc5901d-b785-4857-928c-9d3147d4f412\") " pod="openstack/dnsmasq-dns-85ff748b95-vpmgn" Dec 09 15:33:12 crc kubenswrapper[4716]: I1209 15:33:12.050225 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb352458-dad5-4f43-898f-561101aec09f-config-data\") pod \"barbican-api-5464d7ccfb-6g4nl\" (UID: \"fb352458-dad5-4f43-898f-561101aec09f\") " pod="openstack/barbican-api-5464d7ccfb-6g4nl" Dec 09 15:33:12 crc kubenswrapper[4716]: I1209 15:33:12.050353 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb352458-dad5-4f43-898f-561101aec09f-combined-ca-bundle\") pod \"barbican-api-5464d7ccfb-6g4nl\" (UID: \"fb352458-dad5-4f43-898f-561101aec09f\") " pod="openstack/barbican-api-5464d7ccfb-6g4nl" Dec 09 15:33:12 crc kubenswrapper[4716]: I1209 15:33:12.050407 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fb352458-dad5-4f43-898f-561101aec09f-config-data-custom\") pod \"barbican-api-5464d7ccfb-6g4nl\" (UID: \"fb352458-dad5-4f43-898f-561101aec09f\") " pod="openstack/barbican-api-5464d7ccfb-6g4nl" Dec 09 15:33:12 crc kubenswrapper[4716]: I1209 15:33:12.050459 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hcbpv\" (UniqueName: \"kubernetes.io/projected/fb352458-dad5-4f43-898f-561101aec09f-kube-api-access-hcbpv\") pod \"barbican-api-5464d7ccfb-6g4nl\" (UID: \"fb352458-dad5-4f43-898f-561101aec09f\") " pod="openstack/barbican-api-5464d7ccfb-6g4nl" Dec 09 15:33:12 crc kubenswrapper[4716]: I1209 15:33:12.050510 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fb352458-dad5-4f43-898f-561101aec09f-logs\") pod \"barbican-api-5464d7ccfb-6g4nl\" (UID: \"fb352458-dad5-4f43-898f-561101aec09f\") " pod="openstack/barbican-api-5464d7ccfb-6g4nl" Dec 09 15:33:12 crc kubenswrapper[4716]: I1209 15:33:12.051977 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fb352458-dad5-4f43-898f-561101aec09f-logs\") pod \"barbican-api-5464d7ccfb-6g4nl\" (UID: \"fb352458-dad5-4f43-898f-561101aec09f\") " pod="openstack/barbican-api-5464d7ccfb-6g4nl" Dec 09 15:33:12 crc kubenswrapper[4716]: I1209 15:33:12.058690 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb352458-dad5-4f43-898f-561101aec09f-config-data\") pod \"barbican-api-5464d7ccfb-6g4nl\" (UID: \"fb352458-dad5-4f43-898f-561101aec09f\") " pod="openstack/barbican-api-5464d7ccfb-6g4nl" Dec 09 15:33:12 crc kubenswrapper[4716]: I1209 15:33:12.059311 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fb352458-dad5-4f43-898f-561101aec09f-config-data-custom\") pod \"barbican-api-5464d7ccfb-6g4nl\" (UID: \"fb352458-dad5-4f43-898f-561101aec09f\") " pod="openstack/barbican-api-5464d7ccfb-6g4nl" Dec 09 15:33:12 crc kubenswrapper[4716]: I1209 15:33:12.059420 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb352458-dad5-4f43-898f-561101aec09f-combined-ca-bundle\") pod \"barbican-api-5464d7ccfb-6g4nl\" (UID: \"fb352458-dad5-4f43-898f-561101aec09f\") " pod="openstack/barbican-api-5464d7ccfb-6g4nl" Dec 09 15:33:12 crc kubenswrapper[4716]: I1209 15:33:12.073209 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hcbpv\" (UniqueName: \"kubernetes.io/projected/fb352458-dad5-4f43-898f-561101aec09f-kube-api-access-hcbpv\") pod \"barbican-api-5464d7ccfb-6g4nl\" (UID: \"fb352458-dad5-4f43-898f-561101aec09f\") " pod="openstack/barbican-api-5464d7ccfb-6g4nl" Dec 09 15:33:12 crc kubenswrapper[4716]: I1209 15:33:12.104764 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85ff748b95-vpmgn" Dec 09 15:33:12 crc kubenswrapper[4716]: I1209 15:33:12.124499 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-56fcdcb665-5z7hm" event={"ID":"21c0272c-4b2c-41f2-8777-06d494046fb8","Type":"ContainerStarted","Data":"72a6f44b6860f3610911a56d117f18ade0b388ee24f63bd9f47e21f43139c6e2"} Dec 09 15:33:12 crc kubenswrapper[4716]: I1209 15:33:12.124832 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-56fcdcb665-5z7hm" Dec 09 15:33:12 crc kubenswrapper[4716]: I1209 15:33:12.157634 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5464d7ccfb-6g4nl" Dec 09 15:33:12 crc kubenswrapper[4716]: I1209 15:33:12.194491 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-56fcdcb665-5z7hm" podStartSLOduration=3.194464519 podStartE2EDuration="3.194464519s" podCreationTimestamp="2025-12-09 15:33:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:33:12.154552179 +0000 UTC m=+1479.309296177" watchObservedRunningTime="2025-12-09 15:33:12.194464519 +0000 UTC m=+1479.349208507" Dec 09 15:33:12 crc kubenswrapper[4716]: I1209 15:33:12.518787 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-79554b67bb-ppw4d"] Dec 09 15:33:12 crc kubenswrapper[4716]: W1209 15:33:12.523737 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaf26f699_cc84_4ddf_a63a_450b6bb5cfa2.slice/crio-79022e1386b46bbcb297a34c0d577a230043887480a36df51a7682740c253a9f WatchSource:0}: Error finding container 79022e1386b46bbcb297a34c0d577a230043887480a36df51a7682740c253a9f: Status 404 returned error can't find the container with id 79022e1386b46bbcb297a34c0d577a230043887480a36df51a7682740c253a9f Dec 09 15:33:12 crc kubenswrapper[4716]: I1209 15:33:12.573184 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-5456946d99-njfkq"] Dec 09 15:33:12 crc kubenswrapper[4716]: I1209 15:33:12.945969 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5464d7ccfb-6g4nl"] Dec 09 15:33:12 crc kubenswrapper[4716]: I1209 15:33:12.983549 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-vpmgn"] Dec 09 15:33:13 crc kubenswrapper[4716]: I1209 15:33:13.161942 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5464d7ccfb-6g4nl" event={"ID":"fb352458-dad5-4f43-898f-561101aec09f","Type":"ContainerStarted","Data":"e7bb940f420160c5127a1f00d059b713c9af8a4012b89b8571fd3af9896ce4e0"} Dec 09 15:33:13 crc kubenswrapper[4716]: I1209 15:33:13.173449 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5456946d99-njfkq" event={"ID":"1a338543-0982-4a3c-ac49-49f25a321870","Type":"ContainerStarted","Data":"6039638ef0022721b0cbe56abce4e0014a52c317b3e26e233065e99e375ca727"} Dec 09 15:33:13 crc kubenswrapper[4716]: I1209 15:33:13.207138 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-vpmgn" event={"ID":"3dc5901d-b785-4857-928c-9d3147d4f412","Type":"ContainerStarted","Data":"057b92c6ba02e9dd482616f7b1b0934d1fde4d8f404d5847e643d5543d0d8326"} Dec 09 15:33:13 crc kubenswrapper[4716]: I1209 15:33:13.306318 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e9b49f6e-b7fe-4dd8-81d2-738fd521c166" path="/var/lib/kubelet/pods/e9b49f6e-b7fe-4dd8-81d2-738fd521c166/volumes" Dec 09 15:33:13 crc kubenswrapper[4716]: I1209 15:33:13.311372 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-79554b67bb-ppw4d" event={"ID":"af26f699-cc84-4ddf-a63a-450b6bb5cfa2","Type":"ContainerStarted","Data":"79022e1386b46bbcb297a34c0d577a230043887480a36df51a7682740c253a9f"} Dec 09 15:33:14 crc kubenswrapper[4716]: I1209 15:33:14.254106 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5464d7ccfb-6g4nl" event={"ID":"fb352458-dad5-4f43-898f-561101aec09f","Type":"ContainerStarted","Data":"842cb7525b09e1ede5bad017c8fc6a540e2a04c8ca66311592ddb0a7c05dfaa9"} Dec 09 15:33:14 crc kubenswrapper[4716]: I1209 15:33:14.254763 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5464d7ccfb-6g4nl" event={"ID":"fb352458-dad5-4f43-898f-561101aec09f","Type":"ContainerStarted","Data":"71e4bc3fcad27e9034f797c0f546857b8fe5d6f75a9cf4cd4c6b1df9b5609bce"} Dec 09 15:33:14 crc kubenswrapper[4716]: I1209 15:33:14.255266 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5464d7ccfb-6g4nl" Dec 09 15:33:14 crc kubenswrapper[4716]: I1209 15:33:14.255306 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5464d7ccfb-6g4nl" Dec 09 15:33:14 crc kubenswrapper[4716]: I1209 15:33:14.264965 4716 generic.go:334] "Generic (PLEG): container finished" podID="3dc5901d-b785-4857-928c-9d3147d4f412" containerID="22442207d911f56244b5151f12194e43208847e892395f9486fb81ac851ade4d" exitCode=0 Dec 09 15:33:14 crc kubenswrapper[4716]: I1209 15:33:14.265059 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-vpmgn" event={"ID":"3dc5901d-b785-4857-928c-9d3147d4f412","Type":"ContainerDied","Data":"22442207d911f56244b5151f12194e43208847e892395f9486fb81ac851ade4d"} Dec 09 15:33:14 crc kubenswrapper[4716]: I1209 15:33:14.305785 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-sqhrf" event={"ID":"c31e7c21-64fd-4bb2-b165-df1743489363","Type":"ContainerStarted","Data":"3618ecfbe5474f07f5131d4d0ad84bde532e88e50f0fe62e5e562bcbd33c4218"} Dec 09 15:33:14 crc kubenswrapper[4716]: I1209 15:33:14.307091 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-5464d7ccfb-6g4nl" podStartSLOduration=3.307077627 podStartE2EDuration="3.307077627s" podCreationTimestamp="2025-12-09 15:33:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:33:14.294604608 +0000 UTC m=+1481.449348596" watchObservedRunningTime="2025-12-09 15:33:14.307077627 +0000 UTC m=+1481.461821615" Dec 09 15:33:14 crc kubenswrapper[4716]: I1209 15:33:14.388599 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-sqhrf" podStartSLOduration=6.369292015 podStartE2EDuration="56.388574836s" podCreationTimestamp="2025-12-09 15:32:18 +0000 UTC" firstStartedPulling="2025-12-09 15:32:20.750048985 +0000 UTC m=+1427.904792973" lastFinishedPulling="2025-12-09 15:33:10.769331806 +0000 UTC m=+1477.924075794" observedRunningTime="2025-12-09 15:33:14.356054639 +0000 UTC m=+1481.510798637" watchObservedRunningTime="2025-12-09 15:33:14.388574836 +0000 UTC m=+1481.543318824" Dec 09 15:33:14 crc kubenswrapper[4716]: I1209 15:33:14.857607 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-64c8798578-khfkb"] Dec 09 15:33:14 crc kubenswrapper[4716]: I1209 15:33:14.861151 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-64c8798578-khfkb" Dec 09 15:33:14 crc kubenswrapper[4716]: I1209 15:33:14.865075 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Dec 09 15:33:14 crc kubenswrapper[4716]: I1209 15:33:14.865213 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Dec 09 15:33:14 crc kubenswrapper[4716]: I1209 15:33:14.882713 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-64c8798578-khfkb"] Dec 09 15:33:14 crc kubenswrapper[4716]: I1209 15:33:14.951918 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d52e1a64-3a20-424c-8e65-bcb50625259a-config-data\") pod \"barbican-api-64c8798578-khfkb\" (UID: \"d52e1a64-3a20-424c-8e65-bcb50625259a\") " pod="openstack/barbican-api-64c8798578-khfkb" Dec 09 15:33:14 crc kubenswrapper[4716]: I1209 15:33:14.952043 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d52e1a64-3a20-424c-8e65-bcb50625259a-internal-tls-certs\") pod \"barbican-api-64c8798578-khfkb\" (UID: \"d52e1a64-3a20-424c-8e65-bcb50625259a\") " pod="openstack/barbican-api-64c8798578-khfkb" Dec 09 15:33:14 crc kubenswrapper[4716]: I1209 15:33:14.952072 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d52e1a64-3a20-424c-8e65-bcb50625259a-combined-ca-bundle\") pod \"barbican-api-64c8798578-khfkb\" (UID: \"d52e1a64-3a20-424c-8e65-bcb50625259a\") " pod="openstack/barbican-api-64c8798578-khfkb" Dec 09 15:33:14 crc kubenswrapper[4716]: I1209 15:33:14.952090 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d52e1a64-3a20-424c-8e65-bcb50625259a-public-tls-certs\") pod \"barbican-api-64c8798578-khfkb\" (UID: \"d52e1a64-3a20-424c-8e65-bcb50625259a\") " pod="openstack/barbican-api-64c8798578-khfkb" Dec 09 15:33:14 crc kubenswrapper[4716]: I1209 15:33:14.952142 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d52e1a64-3a20-424c-8e65-bcb50625259a-config-data-custom\") pod \"barbican-api-64c8798578-khfkb\" (UID: \"d52e1a64-3a20-424c-8e65-bcb50625259a\") " pod="openstack/barbican-api-64c8798578-khfkb" Dec 09 15:33:14 crc kubenswrapper[4716]: I1209 15:33:14.952198 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d52e1a64-3a20-424c-8e65-bcb50625259a-logs\") pod \"barbican-api-64c8798578-khfkb\" (UID: \"d52e1a64-3a20-424c-8e65-bcb50625259a\") " pod="openstack/barbican-api-64c8798578-khfkb" Dec 09 15:33:14 crc kubenswrapper[4716]: I1209 15:33:14.952228 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-srhmm\" (UniqueName: \"kubernetes.io/projected/d52e1a64-3a20-424c-8e65-bcb50625259a-kube-api-access-srhmm\") pod \"barbican-api-64c8798578-khfkb\" (UID: \"d52e1a64-3a20-424c-8e65-bcb50625259a\") " pod="openstack/barbican-api-64c8798578-khfkb" Dec 09 15:33:15 crc kubenswrapper[4716]: I1209 15:33:15.054768 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d52e1a64-3a20-424c-8e65-bcb50625259a-logs\") pod \"barbican-api-64c8798578-khfkb\" (UID: \"d52e1a64-3a20-424c-8e65-bcb50625259a\") " pod="openstack/barbican-api-64c8798578-khfkb" Dec 09 15:33:15 crc kubenswrapper[4716]: I1209 15:33:15.054848 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-srhmm\" (UniqueName: \"kubernetes.io/projected/d52e1a64-3a20-424c-8e65-bcb50625259a-kube-api-access-srhmm\") pod \"barbican-api-64c8798578-khfkb\" (UID: \"d52e1a64-3a20-424c-8e65-bcb50625259a\") " pod="openstack/barbican-api-64c8798578-khfkb" Dec 09 15:33:15 crc kubenswrapper[4716]: I1209 15:33:15.054924 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d52e1a64-3a20-424c-8e65-bcb50625259a-config-data\") pod \"barbican-api-64c8798578-khfkb\" (UID: \"d52e1a64-3a20-424c-8e65-bcb50625259a\") " pod="openstack/barbican-api-64c8798578-khfkb" Dec 09 15:33:15 crc kubenswrapper[4716]: I1209 15:33:15.055067 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d52e1a64-3a20-424c-8e65-bcb50625259a-internal-tls-certs\") pod \"barbican-api-64c8798578-khfkb\" (UID: \"d52e1a64-3a20-424c-8e65-bcb50625259a\") " pod="openstack/barbican-api-64c8798578-khfkb" Dec 09 15:33:15 crc kubenswrapper[4716]: I1209 15:33:15.055092 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d52e1a64-3a20-424c-8e65-bcb50625259a-combined-ca-bundle\") pod \"barbican-api-64c8798578-khfkb\" (UID: \"d52e1a64-3a20-424c-8e65-bcb50625259a\") " pod="openstack/barbican-api-64c8798578-khfkb" Dec 09 15:33:15 crc kubenswrapper[4716]: I1209 15:33:15.055122 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d52e1a64-3a20-424c-8e65-bcb50625259a-public-tls-certs\") pod \"barbican-api-64c8798578-khfkb\" (UID: \"d52e1a64-3a20-424c-8e65-bcb50625259a\") " pod="openstack/barbican-api-64c8798578-khfkb" Dec 09 15:33:15 crc kubenswrapper[4716]: I1209 15:33:15.055196 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d52e1a64-3a20-424c-8e65-bcb50625259a-config-data-custom\") pod \"barbican-api-64c8798578-khfkb\" (UID: \"d52e1a64-3a20-424c-8e65-bcb50625259a\") " pod="openstack/barbican-api-64c8798578-khfkb" Dec 09 15:33:15 crc kubenswrapper[4716]: I1209 15:33:15.056532 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d52e1a64-3a20-424c-8e65-bcb50625259a-logs\") pod \"barbican-api-64c8798578-khfkb\" (UID: \"d52e1a64-3a20-424c-8e65-bcb50625259a\") " pod="openstack/barbican-api-64c8798578-khfkb" Dec 09 15:33:15 crc kubenswrapper[4716]: I1209 15:33:15.071277 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d52e1a64-3a20-424c-8e65-bcb50625259a-config-data\") pod \"barbican-api-64c8798578-khfkb\" (UID: \"d52e1a64-3a20-424c-8e65-bcb50625259a\") " pod="openstack/barbican-api-64c8798578-khfkb" Dec 09 15:33:15 crc kubenswrapper[4716]: I1209 15:33:15.081290 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d52e1a64-3a20-424c-8e65-bcb50625259a-public-tls-certs\") pod \"barbican-api-64c8798578-khfkb\" (UID: \"d52e1a64-3a20-424c-8e65-bcb50625259a\") " pod="openstack/barbican-api-64c8798578-khfkb" Dec 09 15:33:15 crc kubenswrapper[4716]: I1209 15:33:15.083498 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d52e1a64-3a20-424c-8e65-bcb50625259a-internal-tls-certs\") pod \"barbican-api-64c8798578-khfkb\" (UID: \"d52e1a64-3a20-424c-8e65-bcb50625259a\") " pod="openstack/barbican-api-64c8798578-khfkb" Dec 09 15:33:15 crc kubenswrapper[4716]: I1209 15:33:15.102558 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-srhmm\" (UniqueName: \"kubernetes.io/projected/d52e1a64-3a20-424c-8e65-bcb50625259a-kube-api-access-srhmm\") pod \"barbican-api-64c8798578-khfkb\" (UID: \"d52e1a64-3a20-424c-8e65-bcb50625259a\") " pod="openstack/barbican-api-64c8798578-khfkb" Dec 09 15:33:15 crc kubenswrapper[4716]: I1209 15:33:15.103458 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d52e1a64-3a20-424c-8e65-bcb50625259a-combined-ca-bundle\") pod \"barbican-api-64c8798578-khfkb\" (UID: \"d52e1a64-3a20-424c-8e65-bcb50625259a\") " pod="openstack/barbican-api-64c8798578-khfkb" Dec 09 15:33:15 crc kubenswrapper[4716]: I1209 15:33:15.104271 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d52e1a64-3a20-424c-8e65-bcb50625259a-config-data-custom\") pod \"barbican-api-64c8798578-khfkb\" (UID: \"d52e1a64-3a20-424c-8e65-bcb50625259a\") " pod="openstack/barbican-api-64c8798578-khfkb" Dec 09 15:33:15 crc kubenswrapper[4716]: I1209 15:33:15.227464 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-64c8798578-khfkb" Dec 09 15:33:15 crc kubenswrapper[4716]: I1209 15:33:15.329554 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-vpmgn" event={"ID":"3dc5901d-b785-4857-928c-9d3147d4f412","Type":"ContainerStarted","Data":"ff5801a0ee109df3d8e18d69f975de05311276d70c002727c081a6e573653c8b"} Dec 09 15:33:15 crc kubenswrapper[4716]: I1209 15:33:15.354363 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-85ff748b95-vpmgn" podStartSLOduration=4.35433932 podStartE2EDuration="4.35433932s" podCreationTimestamp="2025-12-09 15:33:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:33:15.349444989 +0000 UTC m=+1482.504188977" watchObservedRunningTime="2025-12-09 15:33:15.35433932 +0000 UTC m=+1482.509083308" Dec 09 15:33:15 crc kubenswrapper[4716]: I1209 15:33:15.589461 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Dec 09 15:33:15 crc kubenswrapper[4716]: I1209 15:33:15.760768 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Dec 09 15:33:15 crc kubenswrapper[4716]: I1209 15:33:15.773198 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Dec 09 15:33:15 crc kubenswrapper[4716]: I1209 15:33:15.776831 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Dec 09 15:33:16 crc kubenswrapper[4716]: I1209 15:33:16.354512 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-85ff748b95-vpmgn" Dec 09 15:33:16 crc kubenswrapper[4716]: I1209 15:33:16.849903 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-64c8798578-khfkb"] Dec 09 15:33:16 crc kubenswrapper[4716]: W1209 15:33:16.852235 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd52e1a64_3a20_424c_8e65_bcb50625259a.slice/crio-7f04757607d716b33d02a2a78d80bf6248e857081d09a70b26a282b771953fc0 WatchSource:0}: Error finding container 7f04757607d716b33d02a2a78d80bf6248e857081d09a70b26a282b771953fc0: Status 404 returned error can't find the container with id 7f04757607d716b33d02a2a78d80bf6248e857081d09a70b26a282b771953fc0 Dec 09 15:33:17 crc kubenswrapper[4716]: I1209 15:33:17.367397 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-64c8798578-khfkb" event={"ID":"d52e1a64-3a20-424c-8e65-bcb50625259a","Type":"ContainerStarted","Data":"ff5900b98c0aa4fb17420ecb6e3fad1cd110ac09b65b1f6162b3901e5afb55fa"} Dec 09 15:33:17 crc kubenswrapper[4716]: I1209 15:33:17.368747 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-64c8798578-khfkb" event={"ID":"d52e1a64-3a20-424c-8e65-bcb50625259a","Type":"ContainerStarted","Data":"7f04757607d716b33d02a2a78d80bf6248e857081d09a70b26a282b771953fc0"} Dec 09 15:33:17 crc kubenswrapper[4716]: I1209 15:33:17.369113 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-79554b67bb-ppw4d" event={"ID":"af26f699-cc84-4ddf-a63a-450b6bb5cfa2","Type":"ContainerStarted","Data":"4d818deb80b1c2457ec8c51ff045fb82c1fe306df0dea40e1a1dd0ff0c7583bc"} Dec 09 15:33:17 crc kubenswrapper[4716]: I1209 15:33:17.370586 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5456946d99-njfkq" event={"ID":"1a338543-0982-4a3c-ac49-49f25a321870","Type":"ContainerStarted","Data":"ca9ee7325cf3de8c34acc6d08ad71eb42f987faa02e374248c178941940baf1c"} Dec 09 15:33:17 crc kubenswrapper[4716]: I1209 15:33:17.922673 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 15:33:17 crc kubenswrapper[4716]: I1209 15:33:17.922734 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 15:33:17 crc kubenswrapper[4716]: I1209 15:33:17.922776 4716 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" Dec 09 15:33:17 crc kubenswrapper[4716]: I1209 15:33:17.923744 4716 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"86592a964b516fe613abb12daefa0047ee74a39b779e6980fea7c1589b5faf81"} pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 15:33:17 crc kubenswrapper[4716]: I1209 15:33:17.923793 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" containerID="cri-o://86592a964b516fe613abb12daefa0047ee74a39b779e6980fea7c1589b5faf81" gracePeriod=600 Dec 09 15:33:18 crc kubenswrapper[4716]: I1209 15:33:18.387462 4716 generic.go:334] "Generic (PLEG): container finished" podID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerID="86592a964b516fe613abb12daefa0047ee74a39b779e6980fea7c1589b5faf81" exitCode=0 Dec 09 15:33:18 crc kubenswrapper[4716]: I1209 15:33:18.387506 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerDied","Data":"86592a964b516fe613abb12daefa0047ee74a39b779e6980fea7c1589b5faf81"} Dec 09 15:33:18 crc kubenswrapper[4716]: I1209 15:33:18.387850 4716 scope.go:117] "RemoveContainer" containerID="51f7652ef00c2c1bb2d4496bda7b4d5fed9d9a21f2c7f785c34588994268c9b1" Dec 09 15:33:19 crc kubenswrapper[4716]: I1209 15:33:19.409070 4716 generic.go:334] "Generic (PLEG): container finished" podID="c31e7c21-64fd-4bb2-b165-df1743489363" containerID="3618ecfbe5474f07f5131d4d0ad84bde532e88e50f0fe62e5e562bcbd33c4218" exitCode=0 Dec 09 15:33:19 crc kubenswrapper[4716]: I1209 15:33:19.409364 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-sqhrf" event={"ID":"c31e7c21-64fd-4bb2-b165-df1743489363","Type":"ContainerDied","Data":"3618ecfbe5474f07f5131d4d0ad84bde532e88e50f0fe62e5e562bcbd33c4218"} Dec 09 15:33:22 crc kubenswrapper[4716]: I1209 15:33:22.107557 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-85ff748b95-vpmgn" Dec 09 15:33:22 crc kubenswrapper[4716]: I1209 15:33:22.187581 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-dj8vd"] Dec 09 15:33:22 crc kubenswrapper[4716]: I1209 15:33:22.188394 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-55f844cf75-dj8vd" podUID="8e9d87d9-4356-4ed5-b3ac-8e2f1f851220" containerName="dnsmasq-dns" containerID="cri-o://e6a2c0a56133fcd219b5b65d84f7a3281373d6bc8413978af0a990c3d108b608" gracePeriod=10 Dec 09 15:33:22 crc kubenswrapper[4716]: I1209 15:33:22.458507 4716 generic.go:334] "Generic (PLEG): container finished" podID="8e9d87d9-4356-4ed5-b3ac-8e2f1f851220" containerID="e6a2c0a56133fcd219b5b65d84f7a3281373d6bc8413978af0a990c3d108b608" exitCode=0 Dec 09 15:33:22 crc kubenswrapper[4716]: I1209 15:33:22.458846 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-dj8vd" event={"ID":"8e9d87d9-4356-4ed5-b3ac-8e2f1f851220","Type":"ContainerDied","Data":"e6a2c0a56133fcd219b5b65d84f7a3281373d6bc8413978af0a990c3d108b608"} Dec 09 15:33:23 crc kubenswrapper[4716]: I1209 15:33:23.472402 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-79554b67bb-ppw4d" event={"ID":"af26f699-cc84-4ddf-a63a-450b6bb5cfa2","Type":"ContainerStarted","Data":"bc4a4f46300be62727249bd2d976341ad480a90e298465d2b3ec88fb68b86560"} Dec 09 15:33:23 crc kubenswrapper[4716]: I1209 15:33:23.492520 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-79554b67bb-ppw4d" podStartSLOduration=8.654518787 podStartE2EDuration="12.492492042s" podCreationTimestamp="2025-12-09 15:33:11 +0000 UTC" firstStartedPulling="2025-12-09 15:33:12.527163438 +0000 UTC m=+1479.681907426" lastFinishedPulling="2025-12-09 15:33:16.365136693 +0000 UTC m=+1483.519880681" observedRunningTime="2025-12-09 15:33:23.49137426 +0000 UTC m=+1490.646118248" watchObservedRunningTime="2025-12-09 15:33:23.492492042 +0000 UTC m=+1490.647236030" Dec 09 15:33:23 crc kubenswrapper[4716]: I1209 15:33:23.919934 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5464d7ccfb-6g4nl" Dec 09 15:33:23 crc kubenswrapper[4716]: I1209 15:33:23.962570 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-sqhrf" Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.014997 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c31e7c21-64fd-4bb2-b165-df1743489363-db-sync-config-data\") pod \"c31e7c21-64fd-4bb2-b165-df1743489363\" (UID: \"c31e7c21-64fd-4bb2-b165-df1743489363\") " Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.015102 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c31e7c21-64fd-4bb2-b165-df1743489363-combined-ca-bundle\") pod \"c31e7c21-64fd-4bb2-b165-df1743489363\" (UID: \"c31e7c21-64fd-4bb2-b165-df1743489363\") " Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.015223 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c31e7c21-64fd-4bb2-b165-df1743489363-etc-machine-id\") pod \"c31e7c21-64fd-4bb2-b165-df1743489363\" (UID: \"c31e7c21-64fd-4bb2-b165-df1743489363\") " Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.015258 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c31e7c21-64fd-4bb2-b165-df1743489363-scripts\") pod \"c31e7c21-64fd-4bb2-b165-df1743489363\" (UID: \"c31e7c21-64fd-4bb2-b165-df1743489363\") " Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.015282 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5644v\" (UniqueName: \"kubernetes.io/projected/c31e7c21-64fd-4bb2-b165-df1743489363-kube-api-access-5644v\") pod \"c31e7c21-64fd-4bb2-b165-df1743489363\" (UID: \"c31e7c21-64fd-4bb2-b165-df1743489363\") " Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.015360 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c31e7c21-64fd-4bb2-b165-df1743489363-config-data\") pod \"c31e7c21-64fd-4bb2-b165-df1743489363\" (UID: \"c31e7c21-64fd-4bb2-b165-df1743489363\") " Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.024570 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c31e7c21-64fd-4bb2-b165-df1743489363-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "c31e7c21-64fd-4bb2-b165-df1743489363" (UID: "c31e7c21-64fd-4bb2-b165-df1743489363"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.025927 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c31e7c21-64fd-4bb2-b165-df1743489363-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "c31e7c21-64fd-4bb2-b165-df1743489363" (UID: "c31e7c21-64fd-4bb2-b165-df1743489363"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.028511 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c31e7c21-64fd-4bb2-b165-df1743489363-scripts" (OuterVolumeSpecName: "scripts") pod "c31e7c21-64fd-4bb2-b165-df1743489363" (UID: "c31e7c21-64fd-4bb2-b165-df1743489363"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.029783 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c31e7c21-64fd-4bb2-b165-df1743489363-kube-api-access-5644v" (OuterVolumeSpecName: "kube-api-access-5644v") pod "c31e7c21-64fd-4bb2-b165-df1743489363" (UID: "c31e7c21-64fd-4bb2-b165-df1743489363"). InnerVolumeSpecName "kube-api-access-5644v". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.070473 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55f844cf75-dj8vd" Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.119447 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8e9d87d9-4356-4ed5-b3ac-8e2f1f851220-config\") pod \"8e9d87d9-4356-4ed5-b3ac-8e2f1f851220\" (UID: \"8e9d87d9-4356-4ed5-b3ac-8e2f1f851220\") " Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.119954 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8e9d87d9-4356-4ed5-b3ac-8e2f1f851220-dns-swift-storage-0\") pod \"8e9d87d9-4356-4ed5-b3ac-8e2f1f851220\" (UID: \"8e9d87d9-4356-4ed5-b3ac-8e2f1f851220\") " Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.120015 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8e9d87d9-4356-4ed5-b3ac-8e2f1f851220-ovsdbserver-nb\") pod \"8e9d87d9-4356-4ed5-b3ac-8e2f1f851220\" (UID: \"8e9d87d9-4356-4ed5-b3ac-8e2f1f851220\") " Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.120084 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8e9d87d9-4356-4ed5-b3ac-8e2f1f851220-dns-svc\") pod \"8e9d87d9-4356-4ed5-b3ac-8e2f1f851220\" (UID: \"8e9d87d9-4356-4ed5-b3ac-8e2f1f851220\") " Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.120139 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6zzw2\" (UniqueName: \"kubernetes.io/projected/8e9d87d9-4356-4ed5-b3ac-8e2f1f851220-kube-api-access-6zzw2\") pod \"8e9d87d9-4356-4ed5-b3ac-8e2f1f851220\" (UID: \"8e9d87d9-4356-4ed5-b3ac-8e2f1f851220\") " Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.120262 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8e9d87d9-4356-4ed5-b3ac-8e2f1f851220-ovsdbserver-sb\") pod \"8e9d87d9-4356-4ed5-b3ac-8e2f1f851220\" (UID: \"8e9d87d9-4356-4ed5-b3ac-8e2f1f851220\") " Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.120920 4716 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c31e7c21-64fd-4bb2-b165-df1743489363-etc-machine-id\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.120946 4716 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c31e7c21-64fd-4bb2-b165-df1743489363-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.120960 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5644v\" (UniqueName: \"kubernetes.io/projected/c31e7c21-64fd-4bb2-b165-df1743489363-kube-api-access-5644v\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.120976 4716 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c31e7c21-64fd-4bb2-b165-df1743489363-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.135239 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e9d87d9-4356-4ed5-b3ac-8e2f1f851220-kube-api-access-6zzw2" (OuterVolumeSpecName: "kube-api-access-6zzw2") pod "8e9d87d9-4356-4ed5-b3ac-8e2f1f851220" (UID: "8e9d87d9-4356-4ed5-b3ac-8e2f1f851220"). InnerVolumeSpecName "kube-api-access-6zzw2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.223893 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6zzw2\" (UniqueName: \"kubernetes.io/projected/8e9d87d9-4356-4ed5-b3ac-8e2f1f851220-kube-api-access-6zzw2\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.231264 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5464d7ccfb-6g4nl" Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.481546 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c31e7c21-64fd-4bb2-b165-df1743489363-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c31e7c21-64fd-4bb2-b165-df1743489363" (UID: "c31e7c21-64fd-4bb2-b165-df1743489363"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.540469 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c31e7c21-64fd-4bb2-b165-df1743489363-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.545012 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-dj8vd" event={"ID":"8e9d87d9-4356-4ed5-b3ac-8e2f1f851220","Type":"ContainerDied","Data":"5b7611748b987b4600381e41d7be69122a369823c73ce750a84116e6afd22cc5"} Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.545089 4716 scope.go:117] "RemoveContainer" containerID="e6a2c0a56133fcd219b5b65d84f7a3281373d6bc8413978af0a990c3d108b608" Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.545259 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55f844cf75-dj8vd" Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.552536 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e9d87d9-4356-4ed5-b3ac-8e2f1f851220-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "8e9d87d9-4356-4ed5-b3ac-8e2f1f851220" (UID: "8e9d87d9-4356-4ed5-b3ac-8e2f1f851220"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.570439 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c31e7c21-64fd-4bb2-b165-df1743489363-config-data" (OuterVolumeSpecName: "config-data") pod "c31e7c21-64fd-4bb2-b165-df1743489363" (UID: "c31e7c21-64fd-4bb2-b165-df1743489363"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.593230 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9c5e102f-3947-4eae-a8de-78bb48d177e9","Type":"ContainerStarted","Data":"273caa52c06ee4958f9703a292bccbe0d6ffc2e994175b02452b216211b4796c"} Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.593415 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9c5e102f-3947-4eae-a8de-78bb48d177e9" containerName="ceilometer-central-agent" containerID="cri-o://343cc0c9f27ffe5c2dca7847180fab77d8b198bf368d5676828cb2a194c57343" gracePeriod=30 Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.593565 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9c5e102f-3947-4eae-a8de-78bb48d177e9" containerName="proxy-httpd" containerID="cri-o://273caa52c06ee4958f9703a292bccbe0d6ffc2e994175b02452b216211b4796c" gracePeriod=30 Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.593610 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9c5e102f-3947-4eae-a8de-78bb48d177e9" containerName="sg-core" containerID="cri-o://54fca364a08d8ad438e7286e581067fd081306d7f4221a1cae4cd2232d7eefdd" gracePeriod=30 Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.593668 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9c5e102f-3947-4eae-a8de-78bb48d177e9" containerName="ceilometer-notification-agent" containerID="cri-o://e2db032a3fbade4d5b688db4e6e5d90f3b78ab05ee9147a2da305112eb810b4f" gracePeriod=30 Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.593800 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.599179 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e9d87d9-4356-4ed5-b3ac-8e2f1f851220-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "8e9d87d9-4356-4ed5-b3ac-8e2f1f851220" (UID: "8e9d87d9-4356-4ed5-b3ac-8e2f1f851220"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.643420 4716 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8e9d87d9-4356-4ed5-b3ac-8e2f1f851220-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.643473 4716 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8e9d87d9-4356-4ed5-b3ac-8e2f1f851220-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.643487 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c31e7c21-64fd-4bb2-b165-df1743489363-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.647941 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.849711829 podStartE2EDuration="1m5.647919953s" podCreationTimestamp="2025-12-09 15:32:19 +0000 UTC" firstStartedPulling="2025-12-09 15:32:21.027291286 +0000 UTC m=+1428.182035274" lastFinishedPulling="2025-12-09 15:33:23.8254994 +0000 UTC m=+1490.980243398" observedRunningTime="2025-12-09 15:33:24.63359095 +0000 UTC m=+1491.788334938" watchObservedRunningTime="2025-12-09 15:33:24.647919953 +0000 UTC m=+1491.802663941" Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.663333 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e9d87d9-4356-4ed5-b3ac-8e2f1f851220-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "8e9d87d9-4356-4ed5-b3ac-8e2f1f851220" (UID: "8e9d87d9-4356-4ed5-b3ac-8e2f1f851220"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.665103 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e9d87d9-4356-4ed5-b3ac-8e2f1f851220-config" (OuterVolumeSpecName: "config") pod "8e9d87d9-4356-4ed5-b3ac-8e2f1f851220" (UID: "8e9d87d9-4356-4ed5-b3ac-8e2f1f851220"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.665105 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerStarted","Data":"f23d8f973b0c391889ecea36a999ba03ea85dda0d274716304ac66db87b6b8dc"} Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.675025 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e9d87d9-4356-4ed5-b3ac-8e2f1f851220-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8e9d87d9-4356-4ed5-b3ac-8e2f1f851220" (UID: "8e9d87d9-4356-4ed5-b3ac-8e2f1f851220"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.681171 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-sqhrf" Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.682966 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-sqhrf" event={"ID":"c31e7c21-64fd-4bb2-b165-df1743489363","Type":"ContainerDied","Data":"03eb247dbe197e4cc3e62113bc0166fd2a41a93ed41956ae53b8942655dd7567"} Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.683030 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="03eb247dbe197e4cc3e62113bc0166fd2a41a93ed41956ae53b8942655dd7567" Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.696212 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-5456946d99-njfkq" podStartSLOduration=9.898146911 podStartE2EDuration="13.696172444s" podCreationTimestamp="2025-12-09 15:33:11 +0000 UTC" firstStartedPulling="2025-12-09 15:33:12.558944864 +0000 UTC m=+1479.713688852" lastFinishedPulling="2025-12-09 15:33:16.356970397 +0000 UTC m=+1483.511714385" observedRunningTime="2025-12-09 15:33:24.654461322 +0000 UTC m=+1491.809205310" watchObservedRunningTime="2025-12-09 15:33:24.696172444 +0000 UTC m=+1491.850916432" Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.745965 4716 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8e9d87d9-4356-4ed5-b3ac-8e2f1f851220-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.746006 4716 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8e9d87d9-4356-4ed5-b3ac-8e2f1f851220-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.746016 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8e9d87d9-4356-4ed5-b3ac-8e2f1f851220-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.812360 4716 scope.go:117] "RemoveContainer" containerID="968a76c56a7d236e6f73e80d519edca7bc7ca4896ac8c37f033ee084876122f9" Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.946021 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-dj8vd"] Dec 09 15:33:24 crc kubenswrapper[4716]: I1209 15:33:24.977466 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-dj8vd"] Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.242304 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e9d87d9-4356-4ed5-b3ac-8e2f1f851220" path="/var/lib/kubelet/pods/8e9d87d9-4356-4ed5-b3ac-8e2f1f851220/volumes" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.399697 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-tm6xz"] Dec 09 15:33:25 crc kubenswrapper[4716]: E1209 15:33:25.400280 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e9d87d9-4356-4ed5-b3ac-8e2f1f851220" containerName="init" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.400297 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e9d87d9-4356-4ed5-b3ac-8e2f1f851220" containerName="init" Dec 09 15:33:25 crc kubenswrapper[4716]: E1209 15:33:25.400315 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c31e7c21-64fd-4bb2-b165-df1743489363" containerName="cinder-db-sync" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.400322 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="c31e7c21-64fd-4bb2-b165-df1743489363" containerName="cinder-db-sync" Dec 09 15:33:25 crc kubenswrapper[4716]: E1209 15:33:25.400339 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e9d87d9-4356-4ed5-b3ac-8e2f1f851220" containerName="dnsmasq-dns" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.400345 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e9d87d9-4356-4ed5-b3ac-8e2f1f851220" containerName="dnsmasq-dns" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.400576 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e9d87d9-4356-4ed5-b3ac-8e2f1f851220" containerName="dnsmasq-dns" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.400590 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="c31e7c21-64fd-4bb2-b165-df1743489363" containerName="cinder-db-sync" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.401997 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-tm6xz" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.413630 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.415823 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.431415 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-lxgp5" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.431607 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.431741 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.432143 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-tm6xz"] Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.433860 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.455692 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.463306 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/89424e07-4a8d-47e7-b0f0-4d5161773f94-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9776ccc5-tm6xz\" (UID: \"89424e07-4a8d-47e7-b0f0-4d5161773f94\") " pod="openstack/dnsmasq-dns-5c9776ccc5-tm6xz" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.463378 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/cb788ecd-b3f5-4bbc-a888-4962c8d3a106-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"cb788ecd-b3f5-4bbc-a888-4962c8d3a106\") " pod="openstack/cinder-scheduler-0" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.463402 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb788ecd-b3f5-4bbc-a888-4962c8d3a106-scripts\") pod \"cinder-scheduler-0\" (UID: \"cb788ecd-b3f5-4bbc-a888-4962c8d3a106\") " pod="openstack/cinder-scheduler-0" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.463432 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9kw89\" (UniqueName: \"kubernetes.io/projected/cb788ecd-b3f5-4bbc-a888-4962c8d3a106-kube-api-access-9kw89\") pod \"cinder-scheduler-0\" (UID: \"cb788ecd-b3f5-4bbc-a888-4962c8d3a106\") " pod="openstack/cinder-scheduler-0" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.463447 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/89424e07-4a8d-47e7-b0f0-4d5161773f94-config\") pod \"dnsmasq-dns-5c9776ccc5-tm6xz\" (UID: \"89424e07-4a8d-47e7-b0f0-4d5161773f94\") " pod="openstack/dnsmasq-dns-5c9776ccc5-tm6xz" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.463465 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb788ecd-b3f5-4bbc-a888-4962c8d3a106-config-data\") pod \"cinder-scheduler-0\" (UID: \"cb788ecd-b3f5-4bbc-a888-4962c8d3a106\") " pod="openstack/cinder-scheduler-0" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.463480 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cb788ecd-b3f5-4bbc-a888-4962c8d3a106-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"cb788ecd-b3f5-4bbc-a888-4962c8d3a106\") " pod="openstack/cinder-scheduler-0" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.463509 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/89424e07-4a8d-47e7-b0f0-4d5161773f94-dns-svc\") pod \"dnsmasq-dns-5c9776ccc5-tm6xz\" (UID: \"89424e07-4a8d-47e7-b0f0-4d5161773f94\") " pod="openstack/dnsmasq-dns-5c9776ccc5-tm6xz" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.463594 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb788ecd-b3f5-4bbc-a888-4962c8d3a106-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"cb788ecd-b3f5-4bbc-a888-4962c8d3a106\") " pod="openstack/cinder-scheduler-0" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.463612 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g76tp\" (UniqueName: \"kubernetes.io/projected/89424e07-4a8d-47e7-b0f0-4d5161773f94-kube-api-access-g76tp\") pod \"dnsmasq-dns-5c9776ccc5-tm6xz\" (UID: \"89424e07-4a8d-47e7-b0f0-4d5161773f94\") " pod="openstack/dnsmasq-dns-5c9776ccc5-tm6xz" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.463651 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/89424e07-4a8d-47e7-b0f0-4d5161773f94-dns-swift-storage-0\") pod \"dnsmasq-dns-5c9776ccc5-tm6xz\" (UID: \"89424e07-4a8d-47e7-b0f0-4d5161773f94\") " pod="openstack/dnsmasq-dns-5c9776ccc5-tm6xz" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.463672 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/89424e07-4a8d-47e7-b0f0-4d5161773f94-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9776ccc5-tm6xz\" (UID: \"89424e07-4a8d-47e7-b0f0-4d5161773f94\") " pod="openstack/dnsmasq-dns-5c9776ccc5-tm6xz" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.566968 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb788ecd-b3f5-4bbc-a888-4962c8d3a106-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"cb788ecd-b3f5-4bbc-a888-4962c8d3a106\") " pod="openstack/cinder-scheduler-0" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.567320 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g76tp\" (UniqueName: \"kubernetes.io/projected/89424e07-4a8d-47e7-b0f0-4d5161773f94-kube-api-access-g76tp\") pod \"dnsmasq-dns-5c9776ccc5-tm6xz\" (UID: \"89424e07-4a8d-47e7-b0f0-4d5161773f94\") " pod="openstack/dnsmasq-dns-5c9776ccc5-tm6xz" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.567343 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/89424e07-4a8d-47e7-b0f0-4d5161773f94-dns-swift-storage-0\") pod \"dnsmasq-dns-5c9776ccc5-tm6xz\" (UID: \"89424e07-4a8d-47e7-b0f0-4d5161773f94\") " pod="openstack/dnsmasq-dns-5c9776ccc5-tm6xz" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.567368 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/89424e07-4a8d-47e7-b0f0-4d5161773f94-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9776ccc5-tm6xz\" (UID: \"89424e07-4a8d-47e7-b0f0-4d5161773f94\") " pod="openstack/dnsmasq-dns-5c9776ccc5-tm6xz" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.567428 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/89424e07-4a8d-47e7-b0f0-4d5161773f94-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9776ccc5-tm6xz\" (UID: \"89424e07-4a8d-47e7-b0f0-4d5161773f94\") " pod="openstack/dnsmasq-dns-5c9776ccc5-tm6xz" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.567479 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/cb788ecd-b3f5-4bbc-a888-4962c8d3a106-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"cb788ecd-b3f5-4bbc-a888-4962c8d3a106\") " pod="openstack/cinder-scheduler-0" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.567501 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb788ecd-b3f5-4bbc-a888-4962c8d3a106-scripts\") pod \"cinder-scheduler-0\" (UID: \"cb788ecd-b3f5-4bbc-a888-4962c8d3a106\") " pod="openstack/cinder-scheduler-0" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.567538 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9kw89\" (UniqueName: \"kubernetes.io/projected/cb788ecd-b3f5-4bbc-a888-4962c8d3a106-kube-api-access-9kw89\") pod \"cinder-scheduler-0\" (UID: \"cb788ecd-b3f5-4bbc-a888-4962c8d3a106\") " pod="openstack/cinder-scheduler-0" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.567558 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/89424e07-4a8d-47e7-b0f0-4d5161773f94-config\") pod \"dnsmasq-dns-5c9776ccc5-tm6xz\" (UID: \"89424e07-4a8d-47e7-b0f0-4d5161773f94\") " pod="openstack/dnsmasq-dns-5c9776ccc5-tm6xz" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.567579 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb788ecd-b3f5-4bbc-a888-4962c8d3a106-config-data\") pod \"cinder-scheduler-0\" (UID: \"cb788ecd-b3f5-4bbc-a888-4962c8d3a106\") " pod="openstack/cinder-scheduler-0" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.567595 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cb788ecd-b3f5-4bbc-a888-4962c8d3a106-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"cb788ecd-b3f5-4bbc-a888-4962c8d3a106\") " pod="openstack/cinder-scheduler-0" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.567645 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/89424e07-4a8d-47e7-b0f0-4d5161773f94-dns-svc\") pod \"dnsmasq-dns-5c9776ccc5-tm6xz\" (UID: \"89424e07-4a8d-47e7-b0f0-4d5161773f94\") " pod="openstack/dnsmasq-dns-5c9776ccc5-tm6xz" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.567758 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/cb788ecd-b3f5-4bbc-a888-4962c8d3a106-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"cb788ecd-b3f5-4bbc-a888-4962c8d3a106\") " pod="openstack/cinder-scheduler-0" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.568475 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/89424e07-4a8d-47e7-b0f0-4d5161773f94-dns-svc\") pod \"dnsmasq-dns-5c9776ccc5-tm6xz\" (UID: \"89424e07-4a8d-47e7-b0f0-4d5161773f94\") " pod="openstack/dnsmasq-dns-5c9776ccc5-tm6xz" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.569472 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/89424e07-4a8d-47e7-b0f0-4d5161773f94-config\") pod \"dnsmasq-dns-5c9776ccc5-tm6xz\" (UID: \"89424e07-4a8d-47e7-b0f0-4d5161773f94\") " pod="openstack/dnsmasq-dns-5c9776ccc5-tm6xz" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.573647 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/89424e07-4a8d-47e7-b0f0-4d5161773f94-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9776ccc5-tm6xz\" (UID: \"89424e07-4a8d-47e7-b0f0-4d5161773f94\") " pod="openstack/dnsmasq-dns-5c9776ccc5-tm6xz" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.573715 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/89424e07-4a8d-47e7-b0f0-4d5161773f94-dns-swift-storage-0\") pod \"dnsmasq-dns-5c9776ccc5-tm6xz\" (UID: \"89424e07-4a8d-47e7-b0f0-4d5161773f94\") " pod="openstack/dnsmasq-dns-5c9776ccc5-tm6xz" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.574496 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/89424e07-4a8d-47e7-b0f0-4d5161773f94-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9776ccc5-tm6xz\" (UID: \"89424e07-4a8d-47e7-b0f0-4d5161773f94\") " pod="openstack/dnsmasq-dns-5c9776ccc5-tm6xz" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.590862 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb788ecd-b3f5-4bbc-a888-4962c8d3a106-scripts\") pod \"cinder-scheduler-0\" (UID: \"cb788ecd-b3f5-4bbc-a888-4962c8d3a106\") " pod="openstack/cinder-scheduler-0" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.598495 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb788ecd-b3f5-4bbc-a888-4962c8d3a106-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"cb788ecd-b3f5-4bbc-a888-4962c8d3a106\") " pod="openstack/cinder-scheduler-0" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.598792 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb788ecd-b3f5-4bbc-a888-4962c8d3a106-config-data\") pod \"cinder-scheduler-0\" (UID: \"cb788ecd-b3f5-4bbc-a888-4962c8d3a106\") " pod="openstack/cinder-scheduler-0" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.602139 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g76tp\" (UniqueName: \"kubernetes.io/projected/89424e07-4a8d-47e7-b0f0-4d5161773f94-kube-api-access-g76tp\") pod \"dnsmasq-dns-5c9776ccc5-tm6xz\" (UID: \"89424e07-4a8d-47e7-b0f0-4d5161773f94\") " pod="openstack/dnsmasq-dns-5c9776ccc5-tm6xz" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.622310 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9kw89\" (UniqueName: \"kubernetes.io/projected/cb788ecd-b3f5-4bbc-a888-4962c8d3a106-kube-api-access-9kw89\") pod \"cinder-scheduler-0\" (UID: \"cb788ecd-b3f5-4bbc-a888-4962c8d3a106\") " pod="openstack/cinder-scheduler-0" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.623253 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cb788ecd-b3f5-4bbc-a888-4962c8d3a106-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"cb788ecd-b3f5-4bbc-a888-4962c8d3a106\") " pod="openstack/cinder-scheduler-0" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.722152 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5456946d99-njfkq" event={"ID":"1a338543-0982-4a3c-ac49-49f25a321870","Type":"ContainerStarted","Data":"cd1dc40fb035c3ea68ab160896432fb01fc462b06db239c014e6edc8c1371046"} Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.746223 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-tm6xz" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.774004 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.783975 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-64c8798578-khfkb" event={"ID":"d52e1a64-3a20-424c-8e65-bcb50625259a","Type":"ContainerStarted","Data":"3d3df06f90238cf6febe930927c8e361233e27de0aca06ab32895f938c28d274"} Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.784902 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-64c8798578-khfkb" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.785248 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-64c8798578-khfkb" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.827446 4716 generic.go:334] "Generic (PLEG): container finished" podID="9c5e102f-3947-4eae-a8de-78bb48d177e9" containerID="273caa52c06ee4958f9703a292bccbe0d6ffc2e994175b02452b216211b4796c" exitCode=0 Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.827478 4716 generic.go:334] "Generic (PLEG): container finished" podID="9c5e102f-3947-4eae-a8de-78bb48d177e9" containerID="54fca364a08d8ad438e7286e581067fd081306d7f4221a1cae4cd2232d7eefdd" exitCode=2 Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.829733 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9c5e102f-3947-4eae-a8de-78bb48d177e9","Type":"ContainerDied","Data":"273caa52c06ee4958f9703a292bccbe0d6ffc2e994175b02452b216211b4796c"} Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.829803 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9c5e102f-3947-4eae-a8de-78bb48d177e9","Type":"ContainerDied","Data":"54fca364a08d8ad438e7286e581067fd081306d7f4221a1cae4cd2232d7eefdd"} Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.888916 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.902887 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.916893 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.929186 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-64c8798578-khfkb" podStartSLOduration=11.92916074 podStartE2EDuration="11.92916074s" podCreationTimestamp="2025-12-09 15:33:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:33:25.821441586 +0000 UTC m=+1492.976185624" watchObservedRunningTime="2025-12-09 15:33:25.92916074 +0000 UTC m=+1493.083904728" Dec 09 15:33:25 crc kubenswrapper[4716]: I1209 15:33:25.929310 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Dec 09 15:33:26 crc kubenswrapper[4716]: I1209 15:33:26.046079 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-etc-machine-id\") pod \"cinder-api-0\" (UID: \"ef9ed626-a15a-44e8-9f3f-341d1a6967b7\") " pod="openstack/cinder-api-0" Dec 09 15:33:26 crc kubenswrapper[4716]: I1209 15:33:26.046476 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-config-data-custom\") pod \"cinder-api-0\" (UID: \"ef9ed626-a15a-44e8-9f3f-341d1a6967b7\") " pod="openstack/cinder-api-0" Dec 09 15:33:26 crc kubenswrapper[4716]: I1209 15:33:26.046526 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"ef9ed626-a15a-44e8-9f3f-341d1a6967b7\") " pod="openstack/cinder-api-0" Dec 09 15:33:26 crc kubenswrapper[4716]: I1209 15:33:26.046582 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-config-data\") pod \"cinder-api-0\" (UID: \"ef9ed626-a15a-44e8-9f3f-341d1a6967b7\") " pod="openstack/cinder-api-0" Dec 09 15:33:26 crc kubenswrapper[4716]: I1209 15:33:26.046649 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-scripts\") pod \"cinder-api-0\" (UID: \"ef9ed626-a15a-44e8-9f3f-341d1a6967b7\") " pod="openstack/cinder-api-0" Dec 09 15:33:26 crc kubenswrapper[4716]: I1209 15:33:26.046675 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hzczt\" (UniqueName: \"kubernetes.io/projected/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-kube-api-access-hzczt\") pod \"cinder-api-0\" (UID: \"ef9ed626-a15a-44e8-9f3f-341d1a6967b7\") " pod="openstack/cinder-api-0" Dec 09 15:33:26 crc kubenswrapper[4716]: I1209 15:33:26.046699 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-logs\") pod \"cinder-api-0\" (UID: \"ef9ed626-a15a-44e8-9f3f-341d1a6967b7\") " pod="openstack/cinder-api-0" Dec 09 15:33:26 crc kubenswrapper[4716]: I1209 15:33:26.148622 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-etc-machine-id\") pod \"cinder-api-0\" (UID: \"ef9ed626-a15a-44e8-9f3f-341d1a6967b7\") " pod="openstack/cinder-api-0" Dec 09 15:33:26 crc kubenswrapper[4716]: I1209 15:33:26.148752 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-config-data-custom\") pod \"cinder-api-0\" (UID: \"ef9ed626-a15a-44e8-9f3f-341d1a6967b7\") " pod="openstack/cinder-api-0" Dec 09 15:33:26 crc kubenswrapper[4716]: I1209 15:33:26.148803 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"ef9ed626-a15a-44e8-9f3f-341d1a6967b7\") " pod="openstack/cinder-api-0" Dec 09 15:33:26 crc kubenswrapper[4716]: I1209 15:33:26.148853 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-config-data\") pod \"cinder-api-0\" (UID: \"ef9ed626-a15a-44e8-9f3f-341d1a6967b7\") " pod="openstack/cinder-api-0" Dec 09 15:33:26 crc kubenswrapper[4716]: I1209 15:33:26.148900 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-scripts\") pod \"cinder-api-0\" (UID: \"ef9ed626-a15a-44e8-9f3f-341d1a6967b7\") " pod="openstack/cinder-api-0" Dec 09 15:33:26 crc kubenswrapper[4716]: I1209 15:33:26.148926 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hzczt\" (UniqueName: \"kubernetes.io/projected/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-kube-api-access-hzczt\") pod \"cinder-api-0\" (UID: \"ef9ed626-a15a-44e8-9f3f-341d1a6967b7\") " pod="openstack/cinder-api-0" Dec 09 15:33:26 crc kubenswrapper[4716]: I1209 15:33:26.148926 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-etc-machine-id\") pod \"cinder-api-0\" (UID: \"ef9ed626-a15a-44e8-9f3f-341d1a6967b7\") " pod="openstack/cinder-api-0" Dec 09 15:33:26 crc kubenswrapper[4716]: I1209 15:33:26.148952 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-logs\") pod \"cinder-api-0\" (UID: \"ef9ed626-a15a-44e8-9f3f-341d1a6967b7\") " pod="openstack/cinder-api-0" Dec 09 15:33:26 crc kubenswrapper[4716]: I1209 15:33:26.150770 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-logs\") pod \"cinder-api-0\" (UID: \"ef9ed626-a15a-44e8-9f3f-341d1a6967b7\") " pod="openstack/cinder-api-0" Dec 09 15:33:26 crc kubenswrapper[4716]: I1209 15:33:26.161339 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-config-data-custom\") pod \"cinder-api-0\" (UID: \"ef9ed626-a15a-44e8-9f3f-341d1a6967b7\") " pod="openstack/cinder-api-0" Dec 09 15:33:26 crc kubenswrapper[4716]: I1209 15:33:26.162363 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-config-data\") pod \"cinder-api-0\" (UID: \"ef9ed626-a15a-44e8-9f3f-341d1a6967b7\") " pod="openstack/cinder-api-0" Dec 09 15:33:26 crc kubenswrapper[4716]: I1209 15:33:26.178305 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"ef9ed626-a15a-44e8-9f3f-341d1a6967b7\") " pod="openstack/cinder-api-0" Dec 09 15:33:26 crc kubenswrapper[4716]: I1209 15:33:26.186212 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hzczt\" (UniqueName: \"kubernetes.io/projected/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-kube-api-access-hzczt\") pod \"cinder-api-0\" (UID: \"ef9ed626-a15a-44e8-9f3f-341d1a6967b7\") " pod="openstack/cinder-api-0" Dec 09 15:33:26 crc kubenswrapper[4716]: I1209 15:33:26.209117 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-scripts\") pod \"cinder-api-0\" (UID: \"ef9ed626-a15a-44e8-9f3f-341d1a6967b7\") " pod="openstack/cinder-api-0" Dec 09 15:33:26 crc kubenswrapper[4716]: I1209 15:33:26.245272 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 09 15:33:26 crc kubenswrapper[4716]: I1209 15:33:26.555834 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 09 15:33:26 crc kubenswrapper[4716]: I1209 15:33:26.704125 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-tm6xz"] Dec 09 15:33:26 crc kubenswrapper[4716]: I1209 15:33:26.878024 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 15:33:26 crc kubenswrapper[4716]: I1209 15:33:26.885365 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-tm6xz" event={"ID":"89424e07-4a8d-47e7-b0f0-4d5161773f94","Type":"ContainerStarted","Data":"0c3c9bafffe6d7bc606d4a029859b8ae03d27f643bc27b0166a713f80cc62869"} Dec 09 15:33:26 crc kubenswrapper[4716]: I1209 15:33:26.917421 4716 generic.go:334] "Generic (PLEG): container finished" podID="9c5e102f-3947-4eae-a8de-78bb48d177e9" containerID="e2db032a3fbade4d5b688db4e6e5d90f3b78ab05ee9147a2da305112eb810b4f" exitCode=0 Dec 09 15:33:26 crc kubenswrapper[4716]: I1209 15:33:26.917765 4716 generic.go:334] "Generic (PLEG): container finished" podID="9c5e102f-3947-4eae-a8de-78bb48d177e9" containerID="343cc0c9f27ffe5c2dca7847180fab77d8b198bf368d5676828cb2a194c57343" exitCode=0 Dec 09 15:33:26 crc kubenswrapper[4716]: I1209 15:33:26.917854 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9c5e102f-3947-4eae-a8de-78bb48d177e9","Type":"ContainerDied","Data":"e2db032a3fbade4d5b688db4e6e5d90f3b78ab05ee9147a2da305112eb810b4f"} Dec 09 15:33:26 crc kubenswrapper[4716]: I1209 15:33:26.917884 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9c5e102f-3947-4eae-a8de-78bb48d177e9","Type":"ContainerDied","Data":"343cc0c9f27ffe5c2dca7847180fab77d8b198bf368d5676828cb2a194c57343"} Dec 09 15:33:26 crc kubenswrapper[4716]: I1209 15:33:26.917894 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9c5e102f-3947-4eae-a8de-78bb48d177e9","Type":"ContainerDied","Data":"a2f6a9910ea319b5e8ca4374f75b67992edddddd8d0c480d2ad5d957845c4bc2"} Dec 09 15:33:26 crc kubenswrapper[4716]: I1209 15:33:26.917911 4716 scope.go:117] "RemoveContainer" containerID="273caa52c06ee4958f9703a292bccbe0d6ffc2e994175b02452b216211b4796c" Dec 09 15:33:26 crc kubenswrapper[4716]: I1209 15:33:26.918161 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 15:33:26 crc kubenswrapper[4716]: I1209 15:33:26.927280 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"cb788ecd-b3f5-4bbc-a888-4962c8d3a106","Type":"ContainerStarted","Data":"8a8ea922a14cf30b6f763c4fe87af526887ecad83bf45a94f873cc6d9c102f04"} Dec 09 15:33:26 crc kubenswrapper[4716]: I1209 15:33:26.955223 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Dec 09 15:33:26 crc kubenswrapper[4716]: I1209 15:33:26.996661 4716 scope.go:117] "RemoveContainer" containerID="54fca364a08d8ad438e7286e581067fd081306d7f4221a1cae4cd2232d7eefdd" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.076439 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9c5e102f-3947-4eae-a8de-78bb48d177e9-run-httpd\") pod \"9c5e102f-3947-4eae-a8de-78bb48d177e9\" (UID: \"9c5e102f-3947-4eae-a8de-78bb48d177e9\") " Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.076572 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9c5e102f-3947-4eae-a8de-78bb48d177e9-scripts\") pod \"9c5e102f-3947-4eae-a8de-78bb48d177e9\" (UID: \"9c5e102f-3947-4eae-a8de-78bb48d177e9\") " Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.076660 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9c5e102f-3947-4eae-a8de-78bb48d177e9-sg-core-conf-yaml\") pod \"9c5e102f-3947-4eae-a8de-78bb48d177e9\" (UID: \"9c5e102f-3947-4eae-a8de-78bb48d177e9\") " Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.076721 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9c5e102f-3947-4eae-a8de-78bb48d177e9-log-httpd\") pod \"9c5e102f-3947-4eae-a8de-78bb48d177e9\" (UID: \"9c5e102f-3947-4eae-a8de-78bb48d177e9\") " Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.076792 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c5e102f-3947-4eae-a8de-78bb48d177e9-config-data\") pod \"9c5e102f-3947-4eae-a8de-78bb48d177e9\" (UID: \"9c5e102f-3947-4eae-a8de-78bb48d177e9\") " Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.076982 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c5e102f-3947-4eae-a8de-78bb48d177e9-combined-ca-bundle\") pod \"9c5e102f-3947-4eae-a8de-78bb48d177e9\" (UID: \"9c5e102f-3947-4eae-a8de-78bb48d177e9\") " Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.077028 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nxwcz\" (UniqueName: \"kubernetes.io/projected/9c5e102f-3947-4eae-a8de-78bb48d177e9-kube-api-access-nxwcz\") pod \"9c5e102f-3947-4eae-a8de-78bb48d177e9\" (UID: \"9c5e102f-3947-4eae-a8de-78bb48d177e9\") " Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.079026 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9c5e102f-3947-4eae-a8de-78bb48d177e9-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "9c5e102f-3947-4eae-a8de-78bb48d177e9" (UID: "9c5e102f-3947-4eae-a8de-78bb48d177e9"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.079267 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9c5e102f-3947-4eae-a8de-78bb48d177e9-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "9c5e102f-3947-4eae-a8de-78bb48d177e9" (UID: "9c5e102f-3947-4eae-a8de-78bb48d177e9"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.116487 4716 scope.go:117] "RemoveContainer" containerID="e2db032a3fbade4d5b688db4e6e5d90f3b78ab05ee9147a2da305112eb810b4f" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.138700 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9c5e102f-3947-4eae-a8de-78bb48d177e9-scripts" (OuterVolumeSpecName: "scripts") pod "9c5e102f-3947-4eae-a8de-78bb48d177e9" (UID: "9c5e102f-3947-4eae-a8de-78bb48d177e9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.151189 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c5e102f-3947-4eae-a8de-78bb48d177e9-kube-api-access-nxwcz" (OuterVolumeSpecName: "kube-api-access-nxwcz") pod "9c5e102f-3947-4eae-a8de-78bb48d177e9" (UID: "9c5e102f-3947-4eae-a8de-78bb48d177e9"). InnerVolumeSpecName "kube-api-access-nxwcz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.178030 4716 scope.go:117] "RemoveContainer" containerID="343cc0c9f27ffe5c2dca7847180fab77d8b198bf368d5676828cb2a194c57343" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.181138 4716 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9c5e102f-3947-4eae-a8de-78bb48d177e9-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.181173 4716 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9c5e102f-3947-4eae-a8de-78bb48d177e9-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.181186 4716 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9c5e102f-3947-4eae-a8de-78bb48d177e9-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.181202 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nxwcz\" (UniqueName: \"kubernetes.io/projected/9c5e102f-3947-4eae-a8de-78bb48d177e9-kube-api-access-nxwcz\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.243250 4716 scope.go:117] "RemoveContainer" containerID="273caa52c06ee4958f9703a292bccbe0d6ffc2e994175b02452b216211b4796c" Dec 09 15:33:27 crc kubenswrapper[4716]: E1209 15:33:27.245744 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"273caa52c06ee4958f9703a292bccbe0d6ffc2e994175b02452b216211b4796c\": container with ID starting with 273caa52c06ee4958f9703a292bccbe0d6ffc2e994175b02452b216211b4796c not found: ID does not exist" containerID="273caa52c06ee4958f9703a292bccbe0d6ffc2e994175b02452b216211b4796c" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.245789 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"273caa52c06ee4958f9703a292bccbe0d6ffc2e994175b02452b216211b4796c"} err="failed to get container status \"273caa52c06ee4958f9703a292bccbe0d6ffc2e994175b02452b216211b4796c\": rpc error: code = NotFound desc = could not find container \"273caa52c06ee4958f9703a292bccbe0d6ffc2e994175b02452b216211b4796c\": container with ID starting with 273caa52c06ee4958f9703a292bccbe0d6ffc2e994175b02452b216211b4796c not found: ID does not exist" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.245846 4716 scope.go:117] "RemoveContainer" containerID="54fca364a08d8ad438e7286e581067fd081306d7f4221a1cae4cd2232d7eefdd" Dec 09 15:33:27 crc kubenswrapper[4716]: E1209 15:33:27.253417 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"54fca364a08d8ad438e7286e581067fd081306d7f4221a1cae4cd2232d7eefdd\": container with ID starting with 54fca364a08d8ad438e7286e581067fd081306d7f4221a1cae4cd2232d7eefdd not found: ID does not exist" containerID="54fca364a08d8ad438e7286e581067fd081306d7f4221a1cae4cd2232d7eefdd" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.253471 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54fca364a08d8ad438e7286e581067fd081306d7f4221a1cae4cd2232d7eefdd"} err="failed to get container status \"54fca364a08d8ad438e7286e581067fd081306d7f4221a1cae4cd2232d7eefdd\": rpc error: code = NotFound desc = could not find container \"54fca364a08d8ad438e7286e581067fd081306d7f4221a1cae4cd2232d7eefdd\": container with ID starting with 54fca364a08d8ad438e7286e581067fd081306d7f4221a1cae4cd2232d7eefdd not found: ID does not exist" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.253537 4716 scope.go:117] "RemoveContainer" containerID="e2db032a3fbade4d5b688db4e6e5d90f3b78ab05ee9147a2da305112eb810b4f" Dec 09 15:33:27 crc kubenswrapper[4716]: E1209 15:33:27.254035 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e2db032a3fbade4d5b688db4e6e5d90f3b78ab05ee9147a2da305112eb810b4f\": container with ID starting with e2db032a3fbade4d5b688db4e6e5d90f3b78ab05ee9147a2da305112eb810b4f not found: ID does not exist" containerID="e2db032a3fbade4d5b688db4e6e5d90f3b78ab05ee9147a2da305112eb810b4f" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.254088 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e2db032a3fbade4d5b688db4e6e5d90f3b78ab05ee9147a2da305112eb810b4f"} err="failed to get container status \"e2db032a3fbade4d5b688db4e6e5d90f3b78ab05ee9147a2da305112eb810b4f\": rpc error: code = NotFound desc = could not find container \"e2db032a3fbade4d5b688db4e6e5d90f3b78ab05ee9147a2da305112eb810b4f\": container with ID starting with e2db032a3fbade4d5b688db4e6e5d90f3b78ab05ee9147a2da305112eb810b4f not found: ID does not exist" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.254124 4716 scope.go:117] "RemoveContainer" containerID="343cc0c9f27ffe5c2dca7847180fab77d8b198bf368d5676828cb2a194c57343" Dec 09 15:33:27 crc kubenswrapper[4716]: E1209 15:33:27.254539 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"343cc0c9f27ffe5c2dca7847180fab77d8b198bf368d5676828cb2a194c57343\": container with ID starting with 343cc0c9f27ffe5c2dca7847180fab77d8b198bf368d5676828cb2a194c57343 not found: ID does not exist" containerID="343cc0c9f27ffe5c2dca7847180fab77d8b198bf368d5676828cb2a194c57343" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.254566 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"343cc0c9f27ffe5c2dca7847180fab77d8b198bf368d5676828cb2a194c57343"} err="failed to get container status \"343cc0c9f27ffe5c2dca7847180fab77d8b198bf368d5676828cb2a194c57343\": rpc error: code = NotFound desc = could not find container \"343cc0c9f27ffe5c2dca7847180fab77d8b198bf368d5676828cb2a194c57343\": container with ID starting with 343cc0c9f27ffe5c2dca7847180fab77d8b198bf368d5676828cb2a194c57343 not found: ID does not exist" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.254594 4716 scope.go:117] "RemoveContainer" containerID="273caa52c06ee4958f9703a292bccbe0d6ffc2e994175b02452b216211b4796c" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.255034 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"273caa52c06ee4958f9703a292bccbe0d6ffc2e994175b02452b216211b4796c"} err="failed to get container status \"273caa52c06ee4958f9703a292bccbe0d6ffc2e994175b02452b216211b4796c\": rpc error: code = NotFound desc = could not find container \"273caa52c06ee4958f9703a292bccbe0d6ffc2e994175b02452b216211b4796c\": container with ID starting with 273caa52c06ee4958f9703a292bccbe0d6ffc2e994175b02452b216211b4796c not found: ID does not exist" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.255061 4716 scope.go:117] "RemoveContainer" containerID="54fca364a08d8ad438e7286e581067fd081306d7f4221a1cae4cd2232d7eefdd" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.255373 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54fca364a08d8ad438e7286e581067fd081306d7f4221a1cae4cd2232d7eefdd"} err="failed to get container status \"54fca364a08d8ad438e7286e581067fd081306d7f4221a1cae4cd2232d7eefdd\": rpc error: code = NotFound desc = could not find container \"54fca364a08d8ad438e7286e581067fd081306d7f4221a1cae4cd2232d7eefdd\": container with ID starting with 54fca364a08d8ad438e7286e581067fd081306d7f4221a1cae4cd2232d7eefdd not found: ID does not exist" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.255406 4716 scope.go:117] "RemoveContainer" containerID="e2db032a3fbade4d5b688db4e6e5d90f3b78ab05ee9147a2da305112eb810b4f" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.255745 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e2db032a3fbade4d5b688db4e6e5d90f3b78ab05ee9147a2da305112eb810b4f"} err="failed to get container status \"e2db032a3fbade4d5b688db4e6e5d90f3b78ab05ee9147a2da305112eb810b4f\": rpc error: code = NotFound desc = could not find container \"e2db032a3fbade4d5b688db4e6e5d90f3b78ab05ee9147a2da305112eb810b4f\": container with ID starting with e2db032a3fbade4d5b688db4e6e5d90f3b78ab05ee9147a2da305112eb810b4f not found: ID does not exist" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.255792 4716 scope.go:117] "RemoveContainer" containerID="343cc0c9f27ffe5c2dca7847180fab77d8b198bf368d5676828cb2a194c57343" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.256403 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"343cc0c9f27ffe5c2dca7847180fab77d8b198bf368d5676828cb2a194c57343"} err="failed to get container status \"343cc0c9f27ffe5c2dca7847180fab77d8b198bf368d5676828cb2a194c57343\": rpc error: code = NotFound desc = could not find container \"343cc0c9f27ffe5c2dca7847180fab77d8b198bf368d5676828cb2a194c57343\": container with ID starting with 343cc0c9f27ffe5c2dca7847180fab77d8b198bf368d5676828cb2a194c57343 not found: ID does not exist" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.322719 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9c5e102f-3947-4eae-a8de-78bb48d177e9-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "9c5e102f-3947-4eae-a8de-78bb48d177e9" (UID: "9c5e102f-3947-4eae-a8de-78bb48d177e9"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.385478 4716 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9c5e102f-3947-4eae-a8de-78bb48d177e9-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.411357 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9c5e102f-3947-4eae-a8de-78bb48d177e9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9c5e102f-3947-4eae-a8de-78bb48d177e9" (UID: "9c5e102f-3947-4eae-a8de-78bb48d177e9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.487832 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9c5e102f-3947-4eae-a8de-78bb48d177e9-config-data" (OuterVolumeSpecName: "config-data") pod "9c5e102f-3947-4eae-a8de-78bb48d177e9" (UID: "9c5e102f-3947-4eae-a8de-78bb48d177e9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.488092 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c5e102f-3947-4eae-a8de-78bb48d177e9-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.488108 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c5e102f-3947-4eae-a8de-78bb48d177e9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.689184 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.719711 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.735338 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:33:27 crc kubenswrapper[4716]: E1209 15:33:27.736018 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c5e102f-3947-4eae-a8de-78bb48d177e9" containerName="ceilometer-central-agent" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.736039 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c5e102f-3947-4eae-a8de-78bb48d177e9" containerName="ceilometer-central-agent" Dec 09 15:33:27 crc kubenswrapper[4716]: E1209 15:33:27.736097 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c5e102f-3947-4eae-a8de-78bb48d177e9" containerName="proxy-httpd" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.736104 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c5e102f-3947-4eae-a8de-78bb48d177e9" containerName="proxy-httpd" Dec 09 15:33:27 crc kubenswrapper[4716]: E1209 15:33:27.736115 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c5e102f-3947-4eae-a8de-78bb48d177e9" containerName="sg-core" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.736121 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c5e102f-3947-4eae-a8de-78bb48d177e9" containerName="sg-core" Dec 09 15:33:27 crc kubenswrapper[4716]: E1209 15:33:27.736141 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c5e102f-3947-4eae-a8de-78bb48d177e9" containerName="ceilometer-notification-agent" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.736146 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c5e102f-3947-4eae-a8de-78bb48d177e9" containerName="ceilometer-notification-agent" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.736437 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c5e102f-3947-4eae-a8de-78bb48d177e9" containerName="ceilometer-central-agent" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.736467 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c5e102f-3947-4eae-a8de-78bb48d177e9" containerName="proxy-httpd" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.736494 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c5e102f-3947-4eae-a8de-78bb48d177e9" containerName="sg-core" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.736511 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c5e102f-3947-4eae-a8de-78bb48d177e9" containerName="ceilometer-notification-agent" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.761031 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.761183 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.766384 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.766671 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.896764 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04ee94e7-2ee0-4fbb-9145-22effa8ab009-config-data\") pod \"ceilometer-0\" (UID: \"04ee94e7-2ee0-4fbb-9145-22effa8ab009\") " pod="openstack/ceilometer-0" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.896831 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/04ee94e7-2ee0-4fbb-9145-22effa8ab009-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"04ee94e7-2ee0-4fbb-9145-22effa8ab009\") " pod="openstack/ceilometer-0" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.896894 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/04ee94e7-2ee0-4fbb-9145-22effa8ab009-log-httpd\") pod \"ceilometer-0\" (UID: \"04ee94e7-2ee0-4fbb-9145-22effa8ab009\") " pod="openstack/ceilometer-0" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.896924 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04ee94e7-2ee0-4fbb-9145-22effa8ab009-scripts\") pod \"ceilometer-0\" (UID: \"04ee94e7-2ee0-4fbb-9145-22effa8ab009\") " pod="openstack/ceilometer-0" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.896976 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9w8g2\" (UniqueName: \"kubernetes.io/projected/04ee94e7-2ee0-4fbb-9145-22effa8ab009-kube-api-access-9w8g2\") pod \"ceilometer-0\" (UID: \"04ee94e7-2ee0-4fbb-9145-22effa8ab009\") " pod="openstack/ceilometer-0" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.897102 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04ee94e7-2ee0-4fbb-9145-22effa8ab009-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"04ee94e7-2ee0-4fbb-9145-22effa8ab009\") " pod="openstack/ceilometer-0" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.897150 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/04ee94e7-2ee0-4fbb-9145-22effa8ab009-run-httpd\") pod \"ceilometer-0\" (UID: \"04ee94e7-2ee0-4fbb-9145-22effa8ab009\") " pod="openstack/ceilometer-0" Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.953288 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ef9ed626-a15a-44e8-9f3f-341d1a6967b7","Type":"ContainerStarted","Data":"556862d1efec32c631349562fb72d8444e93c1d3f11e3aad4681d0cb551a64b5"} Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.978717 4716 generic.go:334] "Generic (PLEG): container finished" podID="89424e07-4a8d-47e7-b0f0-4d5161773f94" containerID="1f74a13cb12e58636b15e39e154981d688769dcd573ec73f8539709e1c487d2b" exitCode=0 Dec 09 15:33:27 crc kubenswrapper[4716]: I1209 15:33:27.978872 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-tm6xz" event={"ID":"89424e07-4a8d-47e7-b0f0-4d5161773f94","Type":"ContainerDied","Data":"1f74a13cb12e58636b15e39e154981d688769dcd573ec73f8539709e1c487d2b"} Dec 09 15:33:28 crc kubenswrapper[4716]: I1209 15:33:28.000660 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04ee94e7-2ee0-4fbb-9145-22effa8ab009-config-data\") pod \"ceilometer-0\" (UID: \"04ee94e7-2ee0-4fbb-9145-22effa8ab009\") " pod="openstack/ceilometer-0" Dec 09 15:33:28 crc kubenswrapper[4716]: I1209 15:33:28.000741 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/04ee94e7-2ee0-4fbb-9145-22effa8ab009-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"04ee94e7-2ee0-4fbb-9145-22effa8ab009\") " pod="openstack/ceilometer-0" Dec 09 15:33:28 crc kubenswrapper[4716]: I1209 15:33:28.000806 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/04ee94e7-2ee0-4fbb-9145-22effa8ab009-log-httpd\") pod \"ceilometer-0\" (UID: \"04ee94e7-2ee0-4fbb-9145-22effa8ab009\") " pod="openstack/ceilometer-0" Dec 09 15:33:28 crc kubenswrapper[4716]: I1209 15:33:28.000834 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04ee94e7-2ee0-4fbb-9145-22effa8ab009-scripts\") pod \"ceilometer-0\" (UID: \"04ee94e7-2ee0-4fbb-9145-22effa8ab009\") " pod="openstack/ceilometer-0" Dec 09 15:33:28 crc kubenswrapper[4716]: I1209 15:33:28.000876 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9w8g2\" (UniqueName: \"kubernetes.io/projected/04ee94e7-2ee0-4fbb-9145-22effa8ab009-kube-api-access-9w8g2\") pod \"ceilometer-0\" (UID: \"04ee94e7-2ee0-4fbb-9145-22effa8ab009\") " pod="openstack/ceilometer-0" Dec 09 15:33:28 crc kubenswrapper[4716]: I1209 15:33:28.000995 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04ee94e7-2ee0-4fbb-9145-22effa8ab009-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"04ee94e7-2ee0-4fbb-9145-22effa8ab009\") " pod="openstack/ceilometer-0" Dec 09 15:33:28 crc kubenswrapper[4716]: I1209 15:33:28.001049 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/04ee94e7-2ee0-4fbb-9145-22effa8ab009-run-httpd\") pod \"ceilometer-0\" (UID: \"04ee94e7-2ee0-4fbb-9145-22effa8ab009\") " pod="openstack/ceilometer-0" Dec 09 15:33:28 crc kubenswrapper[4716]: I1209 15:33:28.001733 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/04ee94e7-2ee0-4fbb-9145-22effa8ab009-run-httpd\") pod \"ceilometer-0\" (UID: \"04ee94e7-2ee0-4fbb-9145-22effa8ab009\") " pod="openstack/ceilometer-0" Dec 09 15:33:28 crc kubenswrapper[4716]: I1209 15:33:28.009233 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/04ee94e7-2ee0-4fbb-9145-22effa8ab009-log-httpd\") pod \"ceilometer-0\" (UID: \"04ee94e7-2ee0-4fbb-9145-22effa8ab009\") " pod="openstack/ceilometer-0" Dec 09 15:33:28 crc kubenswrapper[4716]: I1209 15:33:28.023154 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/04ee94e7-2ee0-4fbb-9145-22effa8ab009-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"04ee94e7-2ee0-4fbb-9145-22effa8ab009\") " pod="openstack/ceilometer-0" Dec 09 15:33:28 crc kubenswrapper[4716]: I1209 15:33:28.026974 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04ee94e7-2ee0-4fbb-9145-22effa8ab009-scripts\") pod \"ceilometer-0\" (UID: \"04ee94e7-2ee0-4fbb-9145-22effa8ab009\") " pod="openstack/ceilometer-0" Dec 09 15:33:28 crc kubenswrapper[4716]: I1209 15:33:28.027683 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04ee94e7-2ee0-4fbb-9145-22effa8ab009-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"04ee94e7-2ee0-4fbb-9145-22effa8ab009\") " pod="openstack/ceilometer-0" Dec 09 15:33:28 crc kubenswrapper[4716]: I1209 15:33:28.031932 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04ee94e7-2ee0-4fbb-9145-22effa8ab009-config-data\") pod \"ceilometer-0\" (UID: \"04ee94e7-2ee0-4fbb-9145-22effa8ab009\") " pod="openstack/ceilometer-0" Dec 09 15:33:28 crc kubenswrapper[4716]: I1209 15:33:28.035773 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9w8g2\" (UniqueName: \"kubernetes.io/projected/04ee94e7-2ee0-4fbb-9145-22effa8ab009-kube-api-access-9w8g2\") pod \"ceilometer-0\" (UID: \"04ee94e7-2ee0-4fbb-9145-22effa8ab009\") " pod="openstack/ceilometer-0" Dec 09 15:33:28 crc kubenswrapper[4716]: I1209 15:33:28.104903 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 15:33:28 crc kubenswrapper[4716]: I1209 15:33:28.663410 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Dec 09 15:33:29 crc kubenswrapper[4716]: I1209 15:33:29.004761 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ef9ed626-a15a-44e8-9f3f-341d1a6967b7","Type":"ContainerStarted","Data":"4f2aeda864489523c86585554b9d8998a96efd0cc74ae0b70e9cdef158de2be2"} Dec 09 15:33:29 crc kubenswrapper[4716]: I1209 15:33:29.014939 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-tm6xz" event={"ID":"89424e07-4a8d-47e7-b0f0-4d5161773f94","Type":"ContainerStarted","Data":"2ff5d67d362b7047d8a1a4302e41343f2ffeef6fa5dc33b39b0c6a0385d5cca2"} Dec 09 15:33:29 crc kubenswrapper[4716]: I1209 15:33:29.016931 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5c9776ccc5-tm6xz" Dec 09 15:33:29 crc kubenswrapper[4716]: I1209 15:33:29.061444 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5c9776ccc5-tm6xz" podStartSLOduration=4.061421826 podStartE2EDuration="4.061421826s" podCreationTimestamp="2025-12-09 15:33:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:33:29.047174936 +0000 UTC m=+1496.201918924" watchObservedRunningTime="2025-12-09 15:33:29.061421826 +0000 UTC m=+1496.216165814" Dec 09 15:33:29 crc kubenswrapper[4716]: W1209 15:33:29.063388 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod04ee94e7_2ee0_4fbb_9145_22effa8ab009.slice/crio-889e5a96f5b19af7d87b18a5501a387a9b1e986151fa1f46fe526b4095d9a557 WatchSource:0}: Error finding container 889e5a96f5b19af7d87b18a5501a387a9b1e986151fa1f46fe526b4095d9a557: Status 404 returned error can't find the container with id 889e5a96f5b19af7d87b18a5501a387a9b1e986151fa1f46fe526b4095d9a557 Dec 09 15:33:29 crc kubenswrapper[4716]: I1209 15:33:29.100518 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:33:29 crc kubenswrapper[4716]: I1209 15:33:29.242548 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9c5e102f-3947-4eae-a8de-78bb48d177e9" path="/var/lib/kubelet/pods/9c5e102f-3947-4eae-a8de-78bb48d177e9/volumes" Dec 09 15:33:29 crc kubenswrapper[4716]: I1209 15:33:29.485925 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-f86fdcb78-t5sms" Dec 09 15:33:29 crc kubenswrapper[4716]: I1209 15:33:29.644263 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-64c8798578-khfkb" Dec 09 15:33:30 crc kubenswrapper[4716]: I1209 15:33:30.028705 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"04ee94e7-2ee0-4fbb-9145-22effa8ab009","Type":"ContainerStarted","Data":"a1d3b3a81b668ef739d227b362886584f776dcca3b811a4c7b5a2c259ec330b2"} Dec 09 15:33:30 crc kubenswrapper[4716]: I1209 15:33:30.029070 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"04ee94e7-2ee0-4fbb-9145-22effa8ab009","Type":"ContainerStarted","Data":"889e5a96f5b19af7d87b18a5501a387a9b1e986151fa1f46fe526b4095d9a557"} Dec 09 15:33:30 crc kubenswrapper[4716]: I1209 15:33:30.033718 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"cb788ecd-b3f5-4bbc-a888-4962c8d3a106","Type":"ContainerStarted","Data":"e8635e9db23f0cde693a5016caf8ca0e35ea31e851332ac8b6276d13cc759bbf"} Dec 09 15:33:30 crc kubenswrapper[4716]: I1209 15:33:30.033761 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"cb788ecd-b3f5-4bbc-a888-4962c8d3a106","Type":"ContainerStarted","Data":"4fc959434f39bc355ffb101e4598cef91cdfdd3fa0ebaa3373e9e212d78d3ad3"} Dec 09 15:33:30 crc kubenswrapper[4716]: I1209 15:33:30.039474 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ef9ed626-a15a-44e8-9f3f-341d1a6967b7","Type":"ContainerStarted","Data":"edebf391edf04647c06a6b9d3f7b27f7477b570f909356bd43f842cb8ddbea37"} Dec 09 15:33:30 crc kubenswrapper[4716]: I1209 15:33:30.039613 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="ef9ed626-a15a-44e8-9f3f-341d1a6967b7" containerName="cinder-api-log" containerID="cri-o://4f2aeda864489523c86585554b9d8998a96efd0cc74ae0b70e9cdef158de2be2" gracePeriod=30 Dec 09 15:33:30 crc kubenswrapper[4716]: I1209 15:33:30.039672 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="ef9ed626-a15a-44e8-9f3f-341d1a6967b7" containerName="cinder-api" containerID="cri-o://edebf391edf04647c06a6b9d3f7b27f7477b570f909356bd43f842cb8ddbea37" gracePeriod=30 Dec 09 15:33:30 crc kubenswrapper[4716]: I1209 15:33:30.057632 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.752331067 podStartE2EDuration="5.057596417s" podCreationTimestamp="2025-12-09 15:33:25 +0000 UTC" firstStartedPulling="2025-12-09 15:33:26.588348288 +0000 UTC m=+1493.743092276" lastFinishedPulling="2025-12-09 15:33:27.893613638 +0000 UTC m=+1495.048357626" observedRunningTime="2025-12-09 15:33:30.053652073 +0000 UTC m=+1497.208396061" watchObservedRunningTime="2025-12-09 15:33:30.057596417 +0000 UTC m=+1497.212340405" Dec 09 15:33:30 crc kubenswrapper[4716]: I1209 15:33:30.089256 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=5.089232028 podStartE2EDuration="5.089232028s" podCreationTimestamp="2025-12-09 15:33:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:33:30.086181701 +0000 UTC m=+1497.240925689" watchObservedRunningTime="2025-12-09 15:33:30.089232028 +0000 UTC m=+1497.243976016" Dec 09 15:33:30 crc kubenswrapper[4716]: I1209 15:33:30.775316 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Dec 09 15:33:31 crc kubenswrapper[4716]: I1209 15:33:31.060436 4716 generic.go:334] "Generic (PLEG): container finished" podID="ef9ed626-a15a-44e8-9f3f-341d1a6967b7" containerID="4f2aeda864489523c86585554b9d8998a96efd0cc74ae0b70e9cdef158de2be2" exitCode=143 Dec 09 15:33:31 crc kubenswrapper[4716]: I1209 15:33:31.060545 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ef9ed626-a15a-44e8-9f3f-341d1a6967b7","Type":"ContainerDied","Data":"4f2aeda864489523c86585554b9d8998a96efd0cc74ae0b70e9cdef158de2be2"} Dec 09 15:33:31 crc kubenswrapper[4716]: I1209 15:33:31.071968 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"04ee94e7-2ee0-4fbb-9145-22effa8ab009","Type":"ContainerStarted","Data":"7a59b8aa87fb92ce4a8dcc3201948518059b9a27226748d843452d660b84f71d"} Dec 09 15:33:31 crc kubenswrapper[4716]: I1209 15:33:31.245746 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Dec 09 15:33:31 crc kubenswrapper[4716]: I1209 15:33:31.714924 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-64c8798578-khfkb" Dec 09 15:33:31 crc kubenswrapper[4716]: I1209 15:33:31.801875 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-5464d7ccfb-6g4nl"] Dec 09 15:33:31 crc kubenswrapper[4716]: I1209 15:33:31.802476 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-5464d7ccfb-6g4nl" podUID="fb352458-dad5-4f43-898f-561101aec09f" containerName="barbican-api-log" containerID="cri-o://71e4bc3fcad27e9034f797c0f546857b8fe5d6f75a9cf4cd4c6b1df9b5609bce" gracePeriod=30 Dec 09 15:33:31 crc kubenswrapper[4716]: I1209 15:33:31.803116 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-5464d7ccfb-6g4nl" podUID="fb352458-dad5-4f43-898f-561101aec09f" containerName="barbican-api" containerID="cri-o://842cb7525b09e1ede5bad017c8fc6a540e2a04c8ca66311592ddb0a7c05dfaa9" gracePeriod=30 Dec 09 15:33:32 crc kubenswrapper[4716]: I1209 15:33:32.103812 4716 generic.go:334] "Generic (PLEG): container finished" podID="fb352458-dad5-4f43-898f-561101aec09f" containerID="71e4bc3fcad27e9034f797c0f546857b8fe5d6f75a9cf4cd4c6b1df9b5609bce" exitCode=143 Dec 09 15:33:32 crc kubenswrapper[4716]: I1209 15:33:32.103888 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5464d7ccfb-6g4nl" event={"ID":"fb352458-dad5-4f43-898f-561101aec09f","Type":"ContainerDied","Data":"71e4bc3fcad27e9034f797c0f546857b8fe5d6f75a9cf4cd4c6b1df9b5609bce"} Dec 09 15:33:32 crc kubenswrapper[4716]: I1209 15:33:32.125926 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"04ee94e7-2ee0-4fbb-9145-22effa8ab009","Type":"ContainerStarted","Data":"4ca96384d533b03a05a7480a64c81fe0f0f3605373f37166698ac991432ab754"} Dec 09 15:33:32 crc kubenswrapper[4716]: I1209 15:33:32.376592 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-67d7f5448f-zcfm9" Dec 09 15:33:32 crc kubenswrapper[4716]: I1209 15:33:32.492416 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-f86fdcb78-t5sms"] Dec 09 15:33:32 crc kubenswrapper[4716]: I1209 15:33:32.492957 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-f86fdcb78-t5sms" podUID="77ed04c3-9bf0-4ff4-81b7-5844412f66ca" containerName="neutron-api" containerID="cri-o://24b6becee0a78737857fbbcf37f65c5366fad7647320ca9e776989f7cc524b24" gracePeriod=30 Dec 09 15:33:32 crc kubenswrapper[4716]: I1209 15:33:32.493501 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-f86fdcb78-t5sms" podUID="77ed04c3-9bf0-4ff4-81b7-5844412f66ca" containerName="neutron-httpd" containerID="cri-o://373447bd50d24d9ebc4b475510aa6327a4545e4a75fd0031bcc813ddcbd7edb8" gracePeriod=30 Dec 09 15:33:33 crc kubenswrapper[4716]: I1209 15:33:33.141795 4716 generic.go:334] "Generic (PLEG): container finished" podID="77ed04c3-9bf0-4ff4-81b7-5844412f66ca" containerID="373447bd50d24d9ebc4b475510aa6327a4545e4a75fd0031bcc813ddcbd7edb8" exitCode=0 Dec 09 15:33:33 crc kubenswrapper[4716]: I1209 15:33:33.141883 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-f86fdcb78-t5sms" event={"ID":"77ed04c3-9bf0-4ff4-81b7-5844412f66ca","Type":"ContainerDied","Data":"373447bd50d24d9ebc4b475510aa6327a4545e4a75fd0031bcc813ddcbd7edb8"} Dec 09 15:33:34 crc kubenswrapper[4716]: I1209 15:33:34.156515 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"04ee94e7-2ee0-4fbb-9145-22effa8ab009","Type":"ContainerStarted","Data":"4134efb47f3e03f54ffc809ce1964c0941b0f7fccea982956d4a002a1dfc152e"} Dec 09 15:33:34 crc kubenswrapper[4716]: I1209 15:33:34.156868 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 09 15:33:34 crc kubenswrapper[4716]: I1209 15:33:34.184718 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.353825594 podStartE2EDuration="7.184687805s" podCreationTimestamp="2025-12-09 15:33:27 +0000 UTC" firstStartedPulling="2025-12-09 15:33:29.067876322 +0000 UTC m=+1496.222620300" lastFinishedPulling="2025-12-09 15:33:32.898738523 +0000 UTC m=+1500.053482511" observedRunningTime="2025-12-09 15:33:34.177021144 +0000 UTC m=+1501.331765132" watchObservedRunningTime="2025-12-09 15:33:34.184687805 +0000 UTC m=+1501.339431803" Dec 09 15:33:34 crc kubenswrapper[4716]: I1209 15:33:34.883457 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-864d657bcd-fb9jd" Dec 09 15:33:34 crc kubenswrapper[4716]: I1209 15:33:34.973326 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-864d657bcd-fb9jd" Dec 09 15:33:35 crc kubenswrapper[4716]: I1209 15:33:35.017336 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5464d7ccfb-6g4nl" podUID="fb352458-dad5-4f43-898f-561101aec09f" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.196:9311/healthcheck\": read tcp 10.217.0.2:38466->10.217.0.196:9311: read: connection reset by peer" Dec 09 15:33:35 crc kubenswrapper[4716]: I1209 15:33:35.017367 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5464d7ccfb-6g4nl" podUID="fb352458-dad5-4f43-898f-561101aec09f" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.196:9311/healthcheck\": read tcp 10.217.0.2:38480->10.217.0.196:9311: read: connection reset by peer" Dec 09 15:33:35 crc kubenswrapper[4716]: I1209 15:33:35.195525 4716 generic.go:334] "Generic (PLEG): container finished" podID="fb352458-dad5-4f43-898f-561101aec09f" containerID="842cb7525b09e1ede5bad017c8fc6a540e2a04c8ca66311592ddb0a7c05dfaa9" exitCode=0 Dec 09 15:33:35 crc kubenswrapper[4716]: I1209 15:33:35.196271 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5464d7ccfb-6g4nl" event={"ID":"fb352458-dad5-4f43-898f-561101aec09f","Type":"ContainerDied","Data":"842cb7525b09e1ede5bad017c8fc6a540e2a04c8ca66311592ddb0a7c05dfaa9"} Dec 09 15:33:35 crc kubenswrapper[4716]: I1209 15:33:35.656395 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5464d7ccfb-6g4nl" Dec 09 15:33:35 crc kubenswrapper[4716]: I1209 15:33:35.749261 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5c9776ccc5-tm6xz" Dec 09 15:33:35 crc kubenswrapper[4716]: I1209 15:33:35.785783 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hcbpv\" (UniqueName: \"kubernetes.io/projected/fb352458-dad5-4f43-898f-561101aec09f-kube-api-access-hcbpv\") pod \"fb352458-dad5-4f43-898f-561101aec09f\" (UID: \"fb352458-dad5-4f43-898f-561101aec09f\") " Dec 09 15:33:35 crc kubenswrapper[4716]: I1209 15:33:35.786959 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fb352458-dad5-4f43-898f-561101aec09f-config-data-custom\") pod \"fb352458-dad5-4f43-898f-561101aec09f\" (UID: \"fb352458-dad5-4f43-898f-561101aec09f\") " Dec 09 15:33:35 crc kubenswrapper[4716]: I1209 15:33:35.787055 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fb352458-dad5-4f43-898f-561101aec09f-logs\") pod \"fb352458-dad5-4f43-898f-561101aec09f\" (UID: \"fb352458-dad5-4f43-898f-561101aec09f\") " Dec 09 15:33:35 crc kubenswrapper[4716]: I1209 15:33:35.787139 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb352458-dad5-4f43-898f-561101aec09f-config-data\") pod \"fb352458-dad5-4f43-898f-561101aec09f\" (UID: \"fb352458-dad5-4f43-898f-561101aec09f\") " Dec 09 15:33:35 crc kubenswrapper[4716]: I1209 15:33:35.787215 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb352458-dad5-4f43-898f-561101aec09f-combined-ca-bundle\") pod \"fb352458-dad5-4f43-898f-561101aec09f\" (UID: \"fb352458-dad5-4f43-898f-561101aec09f\") " Dec 09 15:33:35 crc kubenswrapper[4716]: I1209 15:33:35.789367 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fb352458-dad5-4f43-898f-561101aec09f-logs" (OuterVolumeSpecName: "logs") pod "fb352458-dad5-4f43-898f-561101aec09f" (UID: "fb352458-dad5-4f43-898f-561101aec09f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:33:35 crc kubenswrapper[4716]: I1209 15:33:35.796798 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb352458-dad5-4f43-898f-561101aec09f-kube-api-access-hcbpv" (OuterVolumeSpecName: "kube-api-access-hcbpv") pod "fb352458-dad5-4f43-898f-561101aec09f" (UID: "fb352458-dad5-4f43-898f-561101aec09f"). InnerVolumeSpecName "kube-api-access-hcbpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:33:35 crc kubenswrapper[4716]: I1209 15:33:35.797776 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb352458-dad5-4f43-898f-561101aec09f-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "fb352458-dad5-4f43-898f-561101aec09f" (UID: "fb352458-dad5-4f43-898f-561101aec09f"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:33:35 crc kubenswrapper[4716]: I1209 15:33:35.855598 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-vpmgn"] Dec 09 15:33:35 crc kubenswrapper[4716]: I1209 15:33:35.856139 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-85ff748b95-vpmgn" podUID="3dc5901d-b785-4857-928c-9d3147d4f412" containerName="dnsmasq-dns" containerID="cri-o://ff5801a0ee109df3d8e18d69f975de05311276d70c002727c081a6e573653c8b" gracePeriod=10 Dec 09 15:33:35 crc kubenswrapper[4716]: I1209 15:33:35.871347 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb352458-dad5-4f43-898f-561101aec09f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fb352458-dad5-4f43-898f-561101aec09f" (UID: "fb352458-dad5-4f43-898f-561101aec09f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:33:35 crc kubenswrapper[4716]: I1209 15:33:35.891461 4716 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fb352458-dad5-4f43-898f-561101aec09f-logs\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:35 crc kubenswrapper[4716]: I1209 15:33:35.891506 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb352458-dad5-4f43-898f-561101aec09f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:35 crc kubenswrapper[4716]: I1209 15:33:35.891522 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hcbpv\" (UniqueName: \"kubernetes.io/projected/fb352458-dad5-4f43-898f-561101aec09f-kube-api-access-hcbpv\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:35 crc kubenswrapper[4716]: I1209 15:33:35.891533 4716 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fb352458-dad5-4f43-898f-561101aec09f-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:35 crc kubenswrapper[4716]: I1209 15:33:35.920106 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb352458-dad5-4f43-898f-561101aec09f-config-data" (OuterVolumeSpecName: "config-data") pod "fb352458-dad5-4f43-898f-561101aec09f" (UID: "fb352458-dad5-4f43-898f-561101aec09f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:33:35 crc kubenswrapper[4716]: I1209 15:33:35.994369 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb352458-dad5-4f43-898f-561101aec09f-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:36 crc kubenswrapper[4716]: I1209 15:33:36.160880 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Dec 09 15:33:36 crc kubenswrapper[4716]: I1209 15:33:36.236031 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5464d7ccfb-6g4nl" event={"ID":"fb352458-dad5-4f43-898f-561101aec09f","Type":"ContainerDied","Data":"e7bb940f420160c5127a1f00d059b713c9af8a4012b89b8571fd3af9896ce4e0"} Dec 09 15:33:36 crc kubenswrapper[4716]: I1209 15:33:36.236785 4716 scope.go:117] "RemoveContainer" containerID="842cb7525b09e1ede5bad017c8fc6a540e2a04c8ca66311592ddb0a7c05dfaa9" Dec 09 15:33:36 crc kubenswrapper[4716]: I1209 15:33:36.236949 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5464d7ccfb-6g4nl" Dec 09 15:33:36 crc kubenswrapper[4716]: I1209 15:33:36.244072 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 09 15:33:36 crc kubenswrapper[4716]: I1209 15:33:36.245331 4716 generic.go:334] "Generic (PLEG): container finished" podID="3dc5901d-b785-4857-928c-9d3147d4f412" containerID="ff5801a0ee109df3d8e18d69f975de05311276d70c002727c081a6e573653c8b" exitCode=0 Dec 09 15:33:36 crc kubenswrapper[4716]: I1209 15:33:36.246095 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="cb788ecd-b3f5-4bbc-a888-4962c8d3a106" containerName="probe" containerID="cri-o://e8635e9db23f0cde693a5016caf8ca0e35ea31e851332ac8b6276d13cc759bbf" gracePeriod=30 Dec 09 15:33:36 crc kubenswrapper[4716]: I1209 15:33:36.246323 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-vpmgn" event={"ID":"3dc5901d-b785-4857-928c-9d3147d4f412","Type":"ContainerDied","Data":"ff5801a0ee109df3d8e18d69f975de05311276d70c002727c081a6e573653c8b"} Dec 09 15:33:36 crc kubenswrapper[4716]: I1209 15:33:36.246607 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="cb788ecd-b3f5-4bbc-a888-4962c8d3a106" containerName="cinder-scheduler" containerID="cri-o://4fc959434f39bc355ffb101e4598cef91cdfdd3fa0ebaa3373e9e212d78d3ad3" gracePeriod=30 Dec 09 15:33:36 crc kubenswrapper[4716]: I1209 15:33:36.294613 4716 scope.go:117] "RemoveContainer" containerID="71e4bc3fcad27e9034f797c0f546857b8fe5d6f75a9cf4cd4c6b1df9b5609bce" Dec 09 15:33:36 crc kubenswrapper[4716]: I1209 15:33:36.334797 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-5464d7ccfb-6g4nl"] Dec 09 15:33:36 crc kubenswrapper[4716]: I1209 15:33:36.365806 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-5464d7ccfb-6g4nl"] Dec 09 15:33:36 crc kubenswrapper[4716]: I1209 15:33:36.591477 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85ff748b95-vpmgn" Dec 09 15:33:36 crc kubenswrapper[4716]: I1209 15:33:36.625109 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-58k7z\" (UniqueName: \"kubernetes.io/projected/3dc5901d-b785-4857-928c-9d3147d4f412-kube-api-access-58k7z\") pod \"3dc5901d-b785-4857-928c-9d3147d4f412\" (UID: \"3dc5901d-b785-4857-928c-9d3147d4f412\") " Dec 09 15:33:36 crc kubenswrapper[4716]: I1209 15:33:36.625470 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3dc5901d-b785-4857-928c-9d3147d4f412-ovsdbserver-nb\") pod \"3dc5901d-b785-4857-928c-9d3147d4f412\" (UID: \"3dc5901d-b785-4857-928c-9d3147d4f412\") " Dec 09 15:33:36 crc kubenswrapper[4716]: I1209 15:33:36.626539 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3dc5901d-b785-4857-928c-9d3147d4f412-dns-swift-storage-0\") pod \"3dc5901d-b785-4857-928c-9d3147d4f412\" (UID: \"3dc5901d-b785-4857-928c-9d3147d4f412\") " Dec 09 15:33:36 crc kubenswrapper[4716]: I1209 15:33:36.626677 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3dc5901d-b785-4857-928c-9d3147d4f412-config\") pod \"3dc5901d-b785-4857-928c-9d3147d4f412\" (UID: \"3dc5901d-b785-4857-928c-9d3147d4f412\") " Dec 09 15:33:36 crc kubenswrapper[4716]: I1209 15:33:36.626813 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3dc5901d-b785-4857-928c-9d3147d4f412-ovsdbserver-sb\") pod \"3dc5901d-b785-4857-928c-9d3147d4f412\" (UID: \"3dc5901d-b785-4857-928c-9d3147d4f412\") " Dec 09 15:33:36 crc kubenswrapper[4716]: I1209 15:33:36.626998 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3dc5901d-b785-4857-928c-9d3147d4f412-dns-svc\") pod \"3dc5901d-b785-4857-928c-9d3147d4f412\" (UID: \"3dc5901d-b785-4857-928c-9d3147d4f412\") " Dec 09 15:33:36 crc kubenswrapper[4716]: I1209 15:33:36.633178 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3dc5901d-b785-4857-928c-9d3147d4f412-kube-api-access-58k7z" (OuterVolumeSpecName: "kube-api-access-58k7z") pod "3dc5901d-b785-4857-928c-9d3147d4f412" (UID: "3dc5901d-b785-4857-928c-9d3147d4f412"). InnerVolumeSpecName "kube-api-access-58k7z". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:33:36 crc kubenswrapper[4716]: I1209 15:33:36.696404 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3dc5901d-b785-4857-928c-9d3147d4f412-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "3dc5901d-b785-4857-928c-9d3147d4f412" (UID: "3dc5901d-b785-4857-928c-9d3147d4f412"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:33:36 crc kubenswrapper[4716]: I1209 15:33:36.730306 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-58k7z\" (UniqueName: \"kubernetes.io/projected/3dc5901d-b785-4857-928c-9d3147d4f412-kube-api-access-58k7z\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:36 crc kubenswrapper[4716]: I1209 15:33:36.730344 4716 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3dc5901d-b785-4857-928c-9d3147d4f412-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:36 crc kubenswrapper[4716]: I1209 15:33:36.751096 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3dc5901d-b785-4857-928c-9d3147d4f412-config" (OuterVolumeSpecName: "config") pod "3dc5901d-b785-4857-928c-9d3147d4f412" (UID: "3dc5901d-b785-4857-928c-9d3147d4f412"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:33:36 crc kubenswrapper[4716]: I1209 15:33:36.786171 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3dc5901d-b785-4857-928c-9d3147d4f412-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "3dc5901d-b785-4857-928c-9d3147d4f412" (UID: "3dc5901d-b785-4857-928c-9d3147d4f412"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:33:36 crc kubenswrapper[4716]: I1209 15:33:36.824341 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3dc5901d-b785-4857-928c-9d3147d4f412-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "3dc5901d-b785-4857-928c-9d3147d4f412" (UID: "3dc5901d-b785-4857-928c-9d3147d4f412"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:33:36 crc kubenswrapper[4716]: I1209 15:33:36.838519 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3dc5901d-b785-4857-928c-9d3147d4f412-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:36 crc kubenswrapper[4716]: I1209 15:33:36.838563 4716 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3dc5901d-b785-4857-928c-9d3147d4f412-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:36 crc kubenswrapper[4716]: I1209 15:33:36.838578 4716 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3dc5901d-b785-4857-928c-9d3147d4f412-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:36 crc kubenswrapper[4716]: I1209 15:33:36.867316 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3dc5901d-b785-4857-928c-9d3147d4f412-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3dc5901d-b785-4857-928c-9d3147d4f412" (UID: "3dc5901d-b785-4857-928c-9d3147d4f412"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:33:36 crc kubenswrapper[4716]: I1209 15:33:36.940848 4716 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3dc5901d-b785-4857-928c-9d3147d4f412-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:37 crc kubenswrapper[4716]: I1209 15:33:37.226450 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fb352458-dad5-4f43-898f-561101aec09f" path="/var/lib/kubelet/pods/fb352458-dad5-4f43-898f-561101aec09f/volumes" Dec 09 15:33:37 crc kubenswrapper[4716]: I1209 15:33:37.262088 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-vpmgn" event={"ID":"3dc5901d-b785-4857-928c-9d3147d4f412","Type":"ContainerDied","Data":"057b92c6ba02e9dd482616f7b1b0934d1fde4d8f404d5847e643d5543d0d8326"} Dec 09 15:33:37 crc kubenswrapper[4716]: I1209 15:33:37.262139 4716 scope.go:117] "RemoveContainer" containerID="ff5801a0ee109df3d8e18d69f975de05311276d70c002727c081a6e573653c8b" Dec 09 15:33:37 crc kubenswrapper[4716]: I1209 15:33:37.262231 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85ff748b95-vpmgn" Dec 09 15:33:37 crc kubenswrapper[4716]: I1209 15:33:37.294601 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-vpmgn"] Dec 09 15:33:37 crc kubenswrapper[4716]: I1209 15:33:37.301677 4716 scope.go:117] "RemoveContainer" containerID="22442207d911f56244b5151f12194e43208847e892395f9486fb81ac851ade4d" Dec 09 15:33:37 crc kubenswrapper[4716]: I1209 15:33:37.315578 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-vpmgn"] Dec 09 15:33:38 crc kubenswrapper[4716]: I1209 15:33:38.297450 4716 generic.go:334] "Generic (PLEG): container finished" podID="cb788ecd-b3f5-4bbc-a888-4962c8d3a106" containerID="e8635e9db23f0cde693a5016caf8ca0e35ea31e851332ac8b6276d13cc759bbf" exitCode=0 Dec 09 15:33:38 crc kubenswrapper[4716]: I1209 15:33:38.297913 4716 generic.go:334] "Generic (PLEG): container finished" podID="cb788ecd-b3f5-4bbc-a888-4962c8d3a106" containerID="4fc959434f39bc355ffb101e4598cef91cdfdd3fa0ebaa3373e9e212d78d3ad3" exitCode=0 Dec 09 15:33:38 crc kubenswrapper[4716]: I1209 15:33:38.297948 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"cb788ecd-b3f5-4bbc-a888-4962c8d3a106","Type":"ContainerDied","Data":"e8635e9db23f0cde693a5016caf8ca0e35ea31e851332ac8b6276d13cc759bbf"} Dec 09 15:33:38 crc kubenswrapper[4716]: I1209 15:33:38.297994 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"cb788ecd-b3f5-4bbc-a888-4962c8d3a106","Type":"ContainerDied","Data":"4fc959434f39bc355ffb101e4598cef91cdfdd3fa0ebaa3373e9e212d78d3ad3"} Dec 09 15:33:38 crc kubenswrapper[4716]: I1209 15:33:38.588348 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 09 15:33:38 crc kubenswrapper[4716]: I1209 15:33:38.677545 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb788ecd-b3f5-4bbc-a888-4962c8d3a106-config-data\") pod \"cb788ecd-b3f5-4bbc-a888-4962c8d3a106\" (UID: \"cb788ecd-b3f5-4bbc-a888-4962c8d3a106\") " Dec 09 15:33:38 crc kubenswrapper[4716]: I1209 15:33:38.677818 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/cb788ecd-b3f5-4bbc-a888-4962c8d3a106-etc-machine-id\") pod \"cb788ecd-b3f5-4bbc-a888-4962c8d3a106\" (UID: \"cb788ecd-b3f5-4bbc-a888-4962c8d3a106\") " Dec 09 15:33:38 crc kubenswrapper[4716]: I1209 15:33:38.677926 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cb788ecd-b3f5-4bbc-a888-4962c8d3a106-config-data-custom\") pod \"cb788ecd-b3f5-4bbc-a888-4962c8d3a106\" (UID: \"cb788ecd-b3f5-4bbc-a888-4962c8d3a106\") " Dec 09 15:33:38 crc kubenswrapper[4716]: I1209 15:33:38.677973 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb788ecd-b3f5-4bbc-a888-4962c8d3a106-scripts\") pod \"cb788ecd-b3f5-4bbc-a888-4962c8d3a106\" (UID: \"cb788ecd-b3f5-4bbc-a888-4962c8d3a106\") " Dec 09 15:33:38 crc kubenswrapper[4716]: I1209 15:33:38.677996 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9kw89\" (UniqueName: \"kubernetes.io/projected/cb788ecd-b3f5-4bbc-a888-4962c8d3a106-kube-api-access-9kw89\") pod \"cb788ecd-b3f5-4bbc-a888-4962c8d3a106\" (UID: \"cb788ecd-b3f5-4bbc-a888-4962c8d3a106\") " Dec 09 15:33:38 crc kubenswrapper[4716]: I1209 15:33:38.678021 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb788ecd-b3f5-4bbc-a888-4962c8d3a106-combined-ca-bundle\") pod \"cb788ecd-b3f5-4bbc-a888-4962c8d3a106\" (UID: \"cb788ecd-b3f5-4bbc-a888-4962c8d3a106\") " Dec 09 15:33:38 crc kubenswrapper[4716]: I1209 15:33:38.679733 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cb788ecd-b3f5-4bbc-a888-4962c8d3a106-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "cb788ecd-b3f5-4bbc-a888-4962c8d3a106" (UID: "cb788ecd-b3f5-4bbc-a888-4962c8d3a106"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 15:33:38 crc kubenswrapper[4716]: I1209 15:33:38.687573 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb788ecd-b3f5-4bbc-a888-4962c8d3a106-kube-api-access-9kw89" (OuterVolumeSpecName: "kube-api-access-9kw89") pod "cb788ecd-b3f5-4bbc-a888-4962c8d3a106" (UID: "cb788ecd-b3f5-4bbc-a888-4962c8d3a106"). InnerVolumeSpecName "kube-api-access-9kw89". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:33:38 crc kubenswrapper[4716]: I1209 15:33:38.690129 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb788ecd-b3f5-4bbc-a888-4962c8d3a106-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "cb788ecd-b3f5-4bbc-a888-4962c8d3a106" (UID: "cb788ecd-b3f5-4bbc-a888-4962c8d3a106"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:33:38 crc kubenswrapper[4716]: I1209 15:33:38.690328 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb788ecd-b3f5-4bbc-a888-4962c8d3a106-scripts" (OuterVolumeSpecName: "scripts") pod "cb788ecd-b3f5-4bbc-a888-4962c8d3a106" (UID: "cb788ecd-b3f5-4bbc-a888-4962c8d3a106"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:33:38 crc kubenswrapper[4716]: I1209 15:33:38.766354 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb788ecd-b3f5-4bbc-a888-4962c8d3a106-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cb788ecd-b3f5-4bbc-a888-4962c8d3a106" (UID: "cb788ecd-b3f5-4bbc-a888-4962c8d3a106"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:33:38 crc kubenswrapper[4716]: I1209 15:33:38.781316 4716 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/cb788ecd-b3f5-4bbc-a888-4962c8d3a106-etc-machine-id\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:38 crc kubenswrapper[4716]: I1209 15:33:38.781354 4716 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cb788ecd-b3f5-4bbc-a888-4962c8d3a106-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:38 crc kubenswrapper[4716]: I1209 15:33:38.781369 4716 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb788ecd-b3f5-4bbc-a888-4962c8d3a106-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:38 crc kubenswrapper[4716]: I1209 15:33:38.781381 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9kw89\" (UniqueName: \"kubernetes.io/projected/cb788ecd-b3f5-4bbc-a888-4962c8d3a106-kube-api-access-9kw89\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:38 crc kubenswrapper[4716]: I1209 15:33:38.781395 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb788ecd-b3f5-4bbc-a888-4962c8d3a106-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:38 crc kubenswrapper[4716]: I1209 15:33:38.812991 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb788ecd-b3f5-4bbc-a888-4962c8d3a106-config-data" (OuterVolumeSpecName: "config-data") pod "cb788ecd-b3f5-4bbc-a888-4962c8d3a106" (UID: "cb788ecd-b3f5-4bbc-a888-4962c8d3a106"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:33:38 crc kubenswrapper[4716]: I1209 15:33:38.884073 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb788ecd-b3f5-4bbc-a888-4962c8d3a106-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:39 crc kubenswrapper[4716]: E1209 15:33:39.113144 4716 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod77ed04c3_9bf0_4ff4_81b7_5844412f66ca.slice/crio-conmon-24b6becee0a78737857fbbcf37f65c5366fad7647320ca9e776989f7cc524b24.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod77ed04c3_9bf0_4ff4_81b7_5844412f66ca.slice/crio-24b6becee0a78737857fbbcf37f65c5366fad7647320ca9e776989f7cc524b24.scope\": RecentStats: unable to find data in memory cache]" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.228464 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3dc5901d-b785-4857-928c-9d3147d4f412" path="/var/lib/kubelet/pods/3dc5901d-b785-4857-928c-9d3147d4f412/volumes" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.284318 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.301107 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-f86fdcb78-t5sms" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.349430 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.349551 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"cb788ecd-b3f5-4bbc-a888-4962c8d3a106","Type":"ContainerDied","Data":"8a8ea922a14cf30b6f763c4fe87af526887ecad83bf45a94f873cc6d9c102f04"} Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.349604 4716 scope.go:117] "RemoveContainer" containerID="e8635e9db23f0cde693a5016caf8ca0e35ea31e851332ac8b6276d13cc759bbf" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.399770 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/77ed04c3-9bf0-4ff4-81b7-5844412f66ca-httpd-config\") pod \"77ed04c3-9bf0-4ff4-81b7-5844412f66ca\" (UID: \"77ed04c3-9bf0-4ff4-81b7-5844412f66ca\") " Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.399940 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/77ed04c3-9bf0-4ff4-81b7-5844412f66ca-config\") pod \"77ed04c3-9bf0-4ff4-81b7-5844412f66ca\" (UID: \"77ed04c3-9bf0-4ff4-81b7-5844412f66ca\") " Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.400029 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/77ed04c3-9bf0-4ff4-81b7-5844412f66ca-ovndb-tls-certs\") pod \"77ed04c3-9bf0-4ff4-81b7-5844412f66ca\" (UID: \"77ed04c3-9bf0-4ff4-81b7-5844412f66ca\") " Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.400094 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qknbb\" (UniqueName: \"kubernetes.io/projected/77ed04c3-9bf0-4ff4-81b7-5844412f66ca-kube-api-access-qknbb\") pod \"77ed04c3-9bf0-4ff4-81b7-5844412f66ca\" (UID: \"77ed04c3-9bf0-4ff4-81b7-5844412f66ca\") " Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.400278 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77ed04c3-9bf0-4ff4-81b7-5844412f66ca-combined-ca-bundle\") pod \"77ed04c3-9bf0-4ff4-81b7-5844412f66ca\" (UID: \"77ed04c3-9bf0-4ff4-81b7-5844412f66ca\") " Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.404999 4716 generic.go:334] "Generic (PLEG): container finished" podID="77ed04c3-9bf0-4ff4-81b7-5844412f66ca" containerID="24b6becee0a78737857fbbcf37f65c5366fad7647320ca9e776989f7cc524b24" exitCode=0 Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.405052 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-f86fdcb78-t5sms" event={"ID":"77ed04c3-9bf0-4ff4-81b7-5844412f66ca","Type":"ContainerDied","Data":"24b6becee0a78737857fbbcf37f65c5366fad7647320ca9e776989f7cc524b24"} Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.405082 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-f86fdcb78-t5sms" event={"ID":"77ed04c3-9bf0-4ff4-81b7-5844412f66ca","Type":"ContainerDied","Data":"adf3f2aff71e6f37745e3022e3915edd0c04a9c35a07710820aaaf2a9e6232a5"} Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.405151 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-f86fdcb78-t5sms" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.418898 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77ed04c3-9bf0-4ff4-81b7-5844412f66ca-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "77ed04c3-9bf0-4ff4-81b7-5844412f66ca" (UID: "77ed04c3-9bf0-4ff4-81b7-5844412f66ca"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.420280 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77ed04c3-9bf0-4ff4-81b7-5844412f66ca-kube-api-access-qknbb" (OuterVolumeSpecName: "kube-api-access-qknbb") pod "77ed04c3-9bf0-4ff4-81b7-5844412f66ca" (UID: "77ed04c3-9bf0-4ff4-81b7-5844412f66ca"). InnerVolumeSpecName "kube-api-access-qknbb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.452672 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.453850 4716 scope.go:117] "RemoveContainer" containerID="4fc959434f39bc355ffb101e4598cef91cdfdd3fa0ebaa3373e9e212d78d3ad3" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.500943 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.506120 4716 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/77ed04c3-9bf0-4ff4-81b7-5844412f66ca-httpd-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.506162 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qknbb\" (UniqueName: \"kubernetes.io/projected/77ed04c3-9bf0-4ff4-81b7-5844412f66ca-kube-api-access-qknbb\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.540466 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77ed04c3-9bf0-4ff4-81b7-5844412f66ca-config" (OuterVolumeSpecName: "config") pod "77ed04c3-9bf0-4ff4-81b7-5844412f66ca" (UID: "77ed04c3-9bf0-4ff4-81b7-5844412f66ca"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.547538 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Dec 09 15:33:39 crc kubenswrapper[4716]: E1209 15:33:39.548143 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77ed04c3-9bf0-4ff4-81b7-5844412f66ca" containerName="neutron-httpd" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.548165 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="77ed04c3-9bf0-4ff4-81b7-5844412f66ca" containerName="neutron-httpd" Dec 09 15:33:39 crc kubenswrapper[4716]: E1209 15:33:39.548199 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3dc5901d-b785-4857-928c-9d3147d4f412" containerName="init" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.548206 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="3dc5901d-b785-4857-928c-9d3147d4f412" containerName="init" Dec 09 15:33:39 crc kubenswrapper[4716]: E1209 15:33:39.548231 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb352458-dad5-4f43-898f-561101aec09f" containerName="barbican-api" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.548240 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb352458-dad5-4f43-898f-561101aec09f" containerName="barbican-api" Dec 09 15:33:39 crc kubenswrapper[4716]: E1209 15:33:39.548251 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb352458-dad5-4f43-898f-561101aec09f" containerName="barbican-api-log" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.548257 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb352458-dad5-4f43-898f-561101aec09f" containerName="barbican-api-log" Dec 09 15:33:39 crc kubenswrapper[4716]: E1209 15:33:39.548263 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77ed04c3-9bf0-4ff4-81b7-5844412f66ca" containerName="neutron-api" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.548269 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="77ed04c3-9bf0-4ff4-81b7-5844412f66ca" containerName="neutron-api" Dec 09 15:33:39 crc kubenswrapper[4716]: E1209 15:33:39.548280 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3dc5901d-b785-4857-928c-9d3147d4f412" containerName="dnsmasq-dns" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.548287 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="3dc5901d-b785-4857-928c-9d3147d4f412" containerName="dnsmasq-dns" Dec 09 15:33:39 crc kubenswrapper[4716]: E1209 15:33:39.548297 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb788ecd-b3f5-4bbc-a888-4962c8d3a106" containerName="cinder-scheduler" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.548304 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb788ecd-b3f5-4bbc-a888-4962c8d3a106" containerName="cinder-scheduler" Dec 09 15:33:39 crc kubenswrapper[4716]: E1209 15:33:39.548316 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb788ecd-b3f5-4bbc-a888-4962c8d3a106" containerName="probe" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.548321 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb788ecd-b3f5-4bbc-a888-4962c8d3a106" containerName="probe" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.548587 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb788ecd-b3f5-4bbc-a888-4962c8d3a106" containerName="cinder-scheduler" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.548609 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="3dc5901d-b785-4857-928c-9d3147d4f412" containerName="dnsmasq-dns" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.548657 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb352458-dad5-4f43-898f-561101aec09f" containerName="barbican-api-log" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.548666 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="77ed04c3-9bf0-4ff4-81b7-5844412f66ca" containerName="neutron-httpd" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.548681 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb352458-dad5-4f43-898f-561101aec09f" containerName="barbican-api" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.548693 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="77ed04c3-9bf0-4ff4-81b7-5844412f66ca" containerName="neutron-api" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.548703 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb788ecd-b3f5-4bbc-a888-4962c8d3a106" containerName="probe" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.551080 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.560513 4716 scope.go:117] "RemoveContainer" containerID="373447bd50d24d9ebc4b475510aa6327a4545e4a75fd0031bcc813ddcbd7edb8" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.562405 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.585254 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.613258 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2343bb60-69e9-4d0d-95d3-0ecdfa36d42f-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"2343bb60-69e9-4d0d-95d3-0ecdfa36d42f\") " pod="openstack/cinder-scheduler-0" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.613361 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2343bb60-69e9-4d0d-95d3-0ecdfa36d42f-scripts\") pod \"cinder-scheduler-0\" (UID: \"2343bb60-69e9-4d0d-95d3-0ecdfa36d42f\") " pod="openstack/cinder-scheduler-0" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.613401 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pwgh7\" (UniqueName: \"kubernetes.io/projected/2343bb60-69e9-4d0d-95d3-0ecdfa36d42f-kube-api-access-pwgh7\") pod \"cinder-scheduler-0\" (UID: \"2343bb60-69e9-4d0d-95d3-0ecdfa36d42f\") " pod="openstack/cinder-scheduler-0" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.613452 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2343bb60-69e9-4d0d-95d3-0ecdfa36d42f-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"2343bb60-69e9-4d0d-95d3-0ecdfa36d42f\") " pod="openstack/cinder-scheduler-0" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.613529 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2343bb60-69e9-4d0d-95d3-0ecdfa36d42f-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"2343bb60-69e9-4d0d-95d3-0ecdfa36d42f\") " pod="openstack/cinder-scheduler-0" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.613572 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2343bb60-69e9-4d0d-95d3-0ecdfa36d42f-config-data\") pod \"cinder-scheduler-0\" (UID: \"2343bb60-69e9-4d0d-95d3-0ecdfa36d42f\") " pod="openstack/cinder-scheduler-0" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.613873 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/77ed04c3-9bf0-4ff4-81b7-5844412f66ca-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.637489 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77ed04c3-9bf0-4ff4-81b7-5844412f66ca-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "77ed04c3-9bf0-4ff4-81b7-5844412f66ca" (UID: "77ed04c3-9bf0-4ff4-81b7-5844412f66ca"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.637945 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77ed04c3-9bf0-4ff4-81b7-5844412f66ca-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "77ed04c3-9bf0-4ff4-81b7-5844412f66ca" (UID: "77ed04c3-9bf0-4ff4-81b7-5844412f66ca"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.715289 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2343bb60-69e9-4d0d-95d3-0ecdfa36d42f-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"2343bb60-69e9-4d0d-95d3-0ecdfa36d42f\") " pod="openstack/cinder-scheduler-0" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.715352 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2343bb60-69e9-4d0d-95d3-0ecdfa36d42f-scripts\") pod \"cinder-scheduler-0\" (UID: \"2343bb60-69e9-4d0d-95d3-0ecdfa36d42f\") " pod="openstack/cinder-scheduler-0" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.715378 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pwgh7\" (UniqueName: \"kubernetes.io/projected/2343bb60-69e9-4d0d-95d3-0ecdfa36d42f-kube-api-access-pwgh7\") pod \"cinder-scheduler-0\" (UID: \"2343bb60-69e9-4d0d-95d3-0ecdfa36d42f\") " pod="openstack/cinder-scheduler-0" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.715427 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2343bb60-69e9-4d0d-95d3-0ecdfa36d42f-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"2343bb60-69e9-4d0d-95d3-0ecdfa36d42f\") " pod="openstack/cinder-scheduler-0" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.715471 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2343bb60-69e9-4d0d-95d3-0ecdfa36d42f-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"2343bb60-69e9-4d0d-95d3-0ecdfa36d42f\") " pod="openstack/cinder-scheduler-0" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.715501 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2343bb60-69e9-4d0d-95d3-0ecdfa36d42f-config-data\") pod \"cinder-scheduler-0\" (UID: \"2343bb60-69e9-4d0d-95d3-0ecdfa36d42f\") " pod="openstack/cinder-scheduler-0" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.715743 4716 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/77ed04c3-9bf0-4ff4-81b7-5844412f66ca-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.715769 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77ed04c3-9bf0-4ff4-81b7-5844412f66ca-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.716869 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2343bb60-69e9-4d0d-95d3-0ecdfa36d42f-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"2343bb60-69e9-4d0d-95d3-0ecdfa36d42f\") " pod="openstack/cinder-scheduler-0" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.720108 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2343bb60-69e9-4d0d-95d3-0ecdfa36d42f-config-data\") pod \"cinder-scheduler-0\" (UID: \"2343bb60-69e9-4d0d-95d3-0ecdfa36d42f\") " pod="openstack/cinder-scheduler-0" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.720553 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2343bb60-69e9-4d0d-95d3-0ecdfa36d42f-scripts\") pod \"cinder-scheduler-0\" (UID: \"2343bb60-69e9-4d0d-95d3-0ecdfa36d42f\") " pod="openstack/cinder-scheduler-0" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.723517 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2343bb60-69e9-4d0d-95d3-0ecdfa36d42f-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"2343bb60-69e9-4d0d-95d3-0ecdfa36d42f\") " pod="openstack/cinder-scheduler-0" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.724267 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2343bb60-69e9-4d0d-95d3-0ecdfa36d42f-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"2343bb60-69e9-4d0d-95d3-0ecdfa36d42f\") " pod="openstack/cinder-scheduler-0" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.729586 4716 scope.go:117] "RemoveContainer" containerID="24b6becee0a78737857fbbcf37f65c5366fad7647320ca9e776989f7cc524b24" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.739252 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pwgh7\" (UniqueName: \"kubernetes.io/projected/2343bb60-69e9-4d0d-95d3-0ecdfa36d42f-kube-api-access-pwgh7\") pod \"cinder-scheduler-0\" (UID: \"2343bb60-69e9-4d0d-95d3-0ecdfa36d42f\") " pod="openstack/cinder-scheduler-0" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.800222 4716 scope.go:117] "RemoveContainer" containerID="373447bd50d24d9ebc4b475510aa6327a4545e4a75fd0031bcc813ddcbd7edb8" Dec 09 15:33:39 crc kubenswrapper[4716]: E1209 15:33:39.800908 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"373447bd50d24d9ebc4b475510aa6327a4545e4a75fd0031bcc813ddcbd7edb8\": container with ID starting with 373447bd50d24d9ebc4b475510aa6327a4545e4a75fd0031bcc813ddcbd7edb8 not found: ID does not exist" containerID="373447bd50d24d9ebc4b475510aa6327a4545e4a75fd0031bcc813ddcbd7edb8" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.800973 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"373447bd50d24d9ebc4b475510aa6327a4545e4a75fd0031bcc813ddcbd7edb8"} err="failed to get container status \"373447bd50d24d9ebc4b475510aa6327a4545e4a75fd0031bcc813ddcbd7edb8\": rpc error: code = NotFound desc = could not find container \"373447bd50d24d9ebc4b475510aa6327a4545e4a75fd0031bcc813ddcbd7edb8\": container with ID starting with 373447bd50d24d9ebc4b475510aa6327a4545e4a75fd0031bcc813ddcbd7edb8 not found: ID does not exist" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.801008 4716 scope.go:117] "RemoveContainer" containerID="24b6becee0a78737857fbbcf37f65c5366fad7647320ca9e776989f7cc524b24" Dec 09 15:33:39 crc kubenswrapper[4716]: E1209 15:33:39.802545 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"24b6becee0a78737857fbbcf37f65c5366fad7647320ca9e776989f7cc524b24\": container with ID starting with 24b6becee0a78737857fbbcf37f65c5366fad7647320ca9e776989f7cc524b24 not found: ID does not exist" containerID="24b6becee0a78737857fbbcf37f65c5366fad7647320ca9e776989f7cc524b24" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.802589 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24b6becee0a78737857fbbcf37f65c5366fad7647320ca9e776989f7cc524b24"} err="failed to get container status \"24b6becee0a78737857fbbcf37f65c5366fad7647320ca9e776989f7cc524b24\": rpc error: code = NotFound desc = could not find container \"24b6becee0a78737857fbbcf37f65c5366fad7647320ca9e776989f7cc524b24\": container with ID starting with 24b6becee0a78737857fbbcf37f65c5366fad7647320ca9e776989f7cc524b24 not found: ID does not exist" Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.810196 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-f86fdcb78-t5sms"] Dec 09 15:33:39 crc kubenswrapper[4716]: I1209 15:33:39.821544 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-f86fdcb78-t5sms"] Dec 09 15:33:40 crc kubenswrapper[4716]: I1209 15:33:40.031139 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 09 15:33:40 crc kubenswrapper[4716]: I1209 15:33:40.637789 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 09 15:33:40 crc kubenswrapper[4716]: W1209 15:33:40.638339 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2343bb60_69e9_4d0d_95d3_0ecdfa36d42f.slice/crio-2d9379c11c9a8937c7eaa198d499a1ab0383ef2b43054a5ef88fffadc773010c WatchSource:0}: Error finding container 2d9379c11c9a8937c7eaa198d499a1ab0383ef2b43054a5ef88fffadc773010c: Status 404 returned error can't find the container with id 2d9379c11c9a8937c7eaa198d499a1ab0383ef2b43054a5ef88fffadc773010c Dec 09 15:33:41 crc kubenswrapper[4716]: I1209 15:33:41.228309 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="77ed04c3-9bf0-4ff4-81b7-5844412f66ca" path="/var/lib/kubelet/pods/77ed04c3-9bf0-4ff4-81b7-5844412f66ca/volumes" Dec 09 15:33:41 crc kubenswrapper[4716]: I1209 15:33:41.229689 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb788ecd-b3f5-4bbc-a888-4962c8d3a106" path="/var/lib/kubelet/pods/cb788ecd-b3f5-4bbc-a888-4962c8d3a106/volumes" Dec 09 15:33:41 crc kubenswrapper[4716]: I1209 15:33:41.463305 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"2343bb60-69e9-4d0d-95d3-0ecdfa36d42f","Type":"ContainerStarted","Data":"0e286ebcd0e0a867d4ddd380d09ee5b53ecad9d964a855b38736c3d5b0d26f6a"} Dec 09 15:33:41 crc kubenswrapper[4716]: I1209 15:33:41.463783 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"2343bb60-69e9-4d0d-95d3-0ecdfa36d42f","Type":"ContainerStarted","Data":"2d9379c11c9a8937c7eaa198d499a1ab0383ef2b43054a5ef88fffadc773010c"} Dec 09 15:33:42 crc kubenswrapper[4716]: I1209 15:33:42.028588 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-56fcdcb665-5z7hm" Dec 09 15:33:42 crc kubenswrapper[4716]: I1209 15:33:42.476446 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"2343bb60-69e9-4d0d-95d3-0ecdfa36d42f","Type":"ContainerStarted","Data":"8f1e52505cc5d44b9611d1eef0008ec0017d20fafa5d7b6da89fb6fd47d48b8e"} Dec 09 15:33:42 crc kubenswrapper[4716]: I1209 15:33:42.525680 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.525650882 podStartE2EDuration="3.525650882s" podCreationTimestamp="2025-12-09 15:33:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:33:42.497497081 +0000 UTC m=+1509.652241069" watchObservedRunningTime="2025-12-09 15:33:42.525650882 +0000 UTC m=+1509.680394880" Dec 09 15:33:43 crc kubenswrapper[4716]: I1209 15:33:43.938365 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-85qhw"] Dec 09 15:33:43 crc kubenswrapper[4716]: I1209 15:33:43.943067 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-85qhw" Dec 09 15:33:43 crc kubenswrapper[4716]: I1209 15:33:43.960661 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-85qhw"] Dec 09 15:33:44 crc kubenswrapper[4716]: I1209 15:33:44.064148 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lhqm8\" (UniqueName: \"kubernetes.io/projected/1075c22d-76f2-4c48-86b1-23374040e152-kube-api-access-lhqm8\") pod \"redhat-marketplace-85qhw\" (UID: \"1075c22d-76f2-4c48-86b1-23374040e152\") " pod="openshift-marketplace/redhat-marketplace-85qhw" Dec 09 15:33:44 crc kubenswrapper[4716]: I1209 15:33:44.064768 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1075c22d-76f2-4c48-86b1-23374040e152-utilities\") pod \"redhat-marketplace-85qhw\" (UID: \"1075c22d-76f2-4c48-86b1-23374040e152\") " pod="openshift-marketplace/redhat-marketplace-85qhw" Dec 09 15:33:44 crc kubenswrapper[4716]: I1209 15:33:44.064942 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1075c22d-76f2-4c48-86b1-23374040e152-catalog-content\") pod \"redhat-marketplace-85qhw\" (UID: \"1075c22d-76f2-4c48-86b1-23374040e152\") " pod="openshift-marketplace/redhat-marketplace-85qhw" Dec 09 15:33:44 crc kubenswrapper[4716]: I1209 15:33:44.167265 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1075c22d-76f2-4c48-86b1-23374040e152-utilities\") pod \"redhat-marketplace-85qhw\" (UID: \"1075c22d-76f2-4c48-86b1-23374040e152\") " pod="openshift-marketplace/redhat-marketplace-85qhw" Dec 09 15:33:44 crc kubenswrapper[4716]: I1209 15:33:44.167349 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1075c22d-76f2-4c48-86b1-23374040e152-catalog-content\") pod \"redhat-marketplace-85qhw\" (UID: \"1075c22d-76f2-4c48-86b1-23374040e152\") " pod="openshift-marketplace/redhat-marketplace-85qhw" Dec 09 15:33:44 crc kubenswrapper[4716]: I1209 15:33:44.167430 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lhqm8\" (UniqueName: \"kubernetes.io/projected/1075c22d-76f2-4c48-86b1-23374040e152-kube-api-access-lhqm8\") pod \"redhat-marketplace-85qhw\" (UID: \"1075c22d-76f2-4c48-86b1-23374040e152\") " pod="openshift-marketplace/redhat-marketplace-85qhw" Dec 09 15:33:44 crc kubenswrapper[4716]: I1209 15:33:44.168074 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1075c22d-76f2-4c48-86b1-23374040e152-catalog-content\") pod \"redhat-marketplace-85qhw\" (UID: \"1075c22d-76f2-4c48-86b1-23374040e152\") " pod="openshift-marketplace/redhat-marketplace-85qhw" Dec 09 15:33:44 crc kubenswrapper[4716]: I1209 15:33:44.168411 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1075c22d-76f2-4c48-86b1-23374040e152-utilities\") pod \"redhat-marketplace-85qhw\" (UID: \"1075c22d-76f2-4c48-86b1-23374040e152\") " pod="openshift-marketplace/redhat-marketplace-85qhw" Dec 09 15:33:44 crc kubenswrapper[4716]: I1209 15:33:44.201127 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lhqm8\" (UniqueName: \"kubernetes.io/projected/1075c22d-76f2-4c48-86b1-23374040e152-kube-api-access-lhqm8\") pod \"redhat-marketplace-85qhw\" (UID: \"1075c22d-76f2-4c48-86b1-23374040e152\") " pod="openshift-marketplace/redhat-marketplace-85qhw" Dec 09 15:33:44 crc kubenswrapper[4716]: I1209 15:33:44.275695 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-85qhw" Dec 09 15:33:44 crc kubenswrapper[4716]: I1209 15:33:44.829445 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-85qhw"] Dec 09 15:33:44 crc kubenswrapper[4716]: W1209 15:33:44.830055 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1075c22d_76f2_4c48_86b1_23374040e152.slice/crio-a5ed2bf6049f87bb749ae145d30495ab36e33d5185e584e9d06b05dcfd7f53f6 WatchSource:0}: Error finding container a5ed2bf6049f87bb749ae145d30495ab36e33d5185e584e9d06b05dcfd7f53f6: Status 404 returned error can't find the container with id a5ed2bf6049f87bb749ae145d30495ab36e33d5185e584e9d06b05dcfd7f53f6 Dec 09 15:33:45 crc kubenswrapper[4716]: I1209 15:33:45.031902 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Dec 09 15:33:45 crc kubenswrapper[4716]: I1209 15:33:45.517293 4716 generic.go:334] "Generic (PLEG): container finished" podID="1075c22d-76f2-4c48-86b1-23374040e152" containerID="b0a371fe82d3346a610e18cf8290d8c22464d5461287bf72afad2223124c62dd" exitCode=0 Dec 09 15:33:45 crc kubenswrapper[4716]: I1209 15:33:45.517336 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-85qhw" event={"ID":"1075c22d-76f2-4c48-86b1-23374040e152","Type":"ContainerDied","Data":"b0a371fe82d3346a610e18cf8290d8c22464d5461287bf72afad2223124c62dd"} Dec 09 15:33:45 crc kubenswrapper[4716]: I1209 15:33:45.517363 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-85qhw" event={"ID":"1075c22d-76f2-4c48-86b1-23374040e152","Type":"ContainerStarted","Data":"a5ed2bf6049f87bb749ae145d30495ab36e33d5185e584e9d06b05dcfd7f53f6"} Dec 09 15:33:46 crc kubenswrapper[4716]: I1209 15:33:46.528956 4716 generic.go:334] "Generic (PLEG): container finished" podID="1075c22d-76f2-4c48-86b1-23374040e152" containerID="222741b4681273d5f47996bdc2d3441de8db0ad698b35edc2337fcba1fd7dddd" exitCode=0 Dec 09 15:33:46 crc kubenswrapper[4716]: I1209 15:33:46.529104 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-85qhw" event={"ID":"1075c22d-76f2-4c48-86b1-23374040e152","Type":"ContainerDied","Data":"222741b4681273d5f47996bdc2d3441de8db0ad698b35edc2337fcba1fd7dddd"} Dec 09 15:33:46 crc kubenswrapper[4716]: I1209 15:33:46.812607 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Dec 09 15:33:46 crc kubenswrapper[4716]: I1209 15:33:46.814203 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Dec 09 15:33:46 crc kubenswrapper[4716]: I1209 15:33:46.825798 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Dec 09 15:33:46 crc kubenswrapper[4716]: I1209 15:33:46.828003 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-p6cs6" Dec 09 15:33:46 crc kubenswrapper[4716]: I1209 15:33:46.829002 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Dec 09 15:33:46 crc kubenswrapper[4716]: I1209 15:33:46.864913 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Dec 09 15:33:46 crc kubenswrapper[4716]: I1209 15:33:46.952579 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/21e53b9e-d9f0-468d-a35b-a961ad8dee41-openstack-config-secret\") pod \"openstackclient\" (UID: \"21e53b9e-d9f0-468d-a35b-a961ad8dee41\") " pod="openstack/openstackclient" Dec 09 15:33:46 crc kubenswrapper[4716]: I1209 15:33:46.952782 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/21e53b9e-d9f0-468d-a35b-a961ad8dee41-openstack-config\") pod \"openstackclient\" (UID: \"21e53b9e-d9f0-468d-a35b-a961ad8dee41\") " pod="openstack/openstackclient" Dec 09 15:33:46 crc kubenswrapper[4716]: I1209 15:33:46.952943 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jrc27\" (UniqueName: \"kubernetes.io/projected/21e53b9e-d9f0-468d-a35b-a961ad8dee41-kube-api-access-jrc27\") pod \"openstackclient\" (UID: \"21e53b9e-d9f0-468d-a35b-a961ad8dee41\") " pod="openstack/openstackclient" Dec 09 15:33:46 crc kubenswrapper[4716]: I1209 15:33:46.952990 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21e53b9e-d9f0-468d-a35b-a961ad8dee41-combined-ca-bundle\") pod \"openstackclient\" (UID: \"21e53b9e-d9f0-468d-a35b-a961ad8dee41\") " pod="openstack/openstackclient" Dec 09 15:33:47 crc kubenswrapper[4716]: I1209 15:33:47.055131 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/21e53b9e-d9f0-468d-a35b-a961ad8dee41-openstack-config\") pod \"openstackclient\" (UID: \"21e53b9e-d9f0-468d-a35b-a961ad8dee41\") " pod="openstack/openstackclient" Dec 09 15:33:47 crc kubenswrapper[4716]: I1209 15:33:47.055290 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21e53b9e-d9f0-468d-a35b-a961ad8dee41-combined-ca-bundle\") pod \"openstackclient\" (UID: \"21e53b9e-d9f0-468d-a35b-a961ad8dee41\") " pod="openstack/openstackclient" Dec 09 15:33:47 crc kubenswrapper[4716]: I1209 15:33:47.055315 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jrc27\" (UniqueName: \"kubernetes.io/projected/21e53b9e-d9f0-468d-a35b-a961ad8dee41-kube-api-access-jrc27\") pod \"openstackclient\" (UID: \"21e53b9e-d9f0-468d-a35b-a961ad8dee41\") " pod="openstack/openstackclient" Dec 09 15:33:47 crc kubenswrapper[4716]: I1209 15:33:47.055390 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/21e53b9e-d9f0-468d-a35b-a961ad8dee41-openstack-config-secret\") pod \"openstackclient\" (UID: \"21e53b9e-d9f0-468d-a35b-a961ad8dee41\") " pod="openstack/openstackclient" Dec 09 15:33:47 crc kubenswrapper[4716]: I1209 15:33:47.059167 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/21e53b9e-d9f0-468d-a35b-a961ad8dee41-openstack-config\") pod \"openstackclient\" (UID: \"21e53b9e-d9f0-468d-a35b-a961ad8dee41\") " pod="openstack/openstackclient" Dec 09 15:33:47 crc kubenswrapper[4716]: I1209 15:33:47.068925 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21e53b9e-d9f0-468d-a35b-a961ad8dee41-combined-ca-bundle\") pod \"openstackclient\" (UID: \"21e53b9e-d9f0-468d-a35b-a961ad8dee41\") " pod="openstack/openstackclient" Dec 09 15:33:47 crc kubenswrapper[4716]: I1209 15:33:47.071135 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/21e53b9e-d9f0-468d-a35b-a961ad8dee41-openstack-config-secret\") pod \"openstackclient\" (UID: \"21e53b9e-d9f0-468d-a35b-a961ad8dee41\") " pod="openstack/openstackclient" Dec 09 15:33:47 crc kubenswrapper[4716]: I1209 15:33:47.115001 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jrc27\" (UniqueName: \"kubernetes.io/projected/21e53b9e-d9f0-468d-a35b-a961ad8dee41-kube-api-access-jrc27\") pod \"openstackclient\" (UID: \"21e53b9e-d9f0-468d-a35b-a961ad8dee41\") " pod="openstack/openstackclient" Dec 09 15:33:47 crc kubenswrapper[4716]: I1209 15:33:47.143468 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Dec 09 15:33:47 crc kubenswrapper[4716]: I1209 15:33:47.297697 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Dec 09 15:33:47 crc kubenswrapper[4716]: I1209 15:33:47.345375 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Dec 09 15:33:47 crc kubenswrapper[4716]: I1209 15:33:47.389706 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Dec 09 15:33:47 crc kubenswrapper[4716]: I1209 15:33:47.391454 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Dec 09 15:33:47 crc kubenswrapper[4716]: I1209 15:33:47.408538 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Dec 09 15:33:47 crc kubenswrapper[4716]: I1209 15:33:47.468697 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j7t4q\" (UniqueName: \"kubernetes.io/projected/90ad0d77-c429-467b-a32d-46be1ccd1c9b-kube-api-access-j7t4q\") pod \"openstackclient\" (UID: \"90ad0d77-c429-467b-a32d-46be1ccd1c9b\") " pod="openstack/openstackclient" Dec 09 15:33:47 crc kubenswrapper[4716]: I1209 15:33:47.470002 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/90ad0d77-c429-467b-a32d-46be1ccd1c9b-openstack-config\") pod \"openstackclient\" (UID: \"90ad0d77-c429-467b-a32d-46be1ccd1c9b\") " pod="openstack/openstackclient" Dec 09 15:33:47 crc kubenswrapper[4716]: I1209 15:33:47.470445 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/90ad0d77-c429-467b-a32d-46be1ccd1c9b-openstack-config-secret\") pod \"openstackclient\" (UID: \"90ad0d77-c429-467b-a32d-46be1ccd1c9b\") " pod="openstack/openstackclient" Dec 09 15:33:47 crc kubenswrapper[4716]: I1209 15:33:47.470544 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90ad0d77-c429-467b-a32d-46be1ccd1c9b-combined-ca-bundle\") pod \"openstackclient\" (UID: \"90ad0d77-c429-467b-a32d-46be1ccd1c9b\") " pod="openstack/openstackclient" Dec 09 15:33:47 crc kubenswrapper[4716]: I1209 15:33:47.545030 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-85qhw" event={"ID":"1075c22d-76f2-4c48-86b1-23374040e152","Type":"ContainerStarted","Data":"a692febd399ac1eae8f990122567fc7d3c009d489f4f505a13d1eac063480af7"} Dec 09 15:33:47 crc kubenswrapper[4716]: I1209 15:33:47.571787 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-85qhw" podStartSLOduration=3.151255626 podStartE2EDuration="4.571767277s" podCreationTimestamp="2025-12-09 15:33:43 +0000 UTC" firstStartedPulling="2025-12-09 15:33:45.519257812 +0000 UTC m=+1512.674001800" lastFinishedPulling="2025-12-09 15:33:46.939769463 +0000 UTC m=+1514.094513451" observedRunningTime="2025-12-09 15:33:47.567941457 +0000 UTC m=+1514.722685435" watchObservedRunningTime="2025-12-09 15:33:47.571767277 +0000 UTC m=+1514.726511265" Dec 09 15:33:47 crc kubenswrapper[4716]: I1209 15:33:47.572298 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/90ad0d77-c429-467b-a32d-46be1ccd1c9b-openstack-config-secret\") pod \"openstackclient\" (UID: \"90ad0d77-c429-467b-a32d-46be1ccd1c9b\") " pod="openstack/openstackclient" Dec 09 15:33:47 crc kubenswrapper[4716]: I1209 15:33:47.572386 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90ad0d77-c429-467b-a32d-46be1ccd1c9b-combined-ca-bundle\") pod \"openstackclient\" (UID: \"90ad0d77-c429-467b-a32d-46be1ccd1c9b\") " pod="openstack/openstackclient" Dec 09 15:33:47 crc kubenswrapper[4716]: I1209 15:33:47.572458 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j7t4q\" (UniqueName: \"kubernetes.io/projected/90ad0d77-c429-467b-a32d-46be1ccd1c9b-kube-api-access-j7t4q\") pod \"openstackclient\" (UID: \"90ad0d77-c429-467b-a32d-46be1ccd1c9b\") " pod="openstack/openstackclient" Dec 09 15:33:47 crc kubenswrapper[4716]: I1209 15:33:47.572491 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/90ad0d77-c429-467b-a32d-46be1ccd1c9b-openstack-config\") pod \"openstackclient\" (UID: \"90ad0d77-c429-467b-a32d-46be1ccd1c9b\") " pod="openstack/openstackclient" Dec 09 15:33:47 crc kubenswrapper[4716]: I1209 15:33:47.573615 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/90ad0d77-c429-467b-a32d-46be1ccd1c9b-openstack-config\") pod \"openstackclient\" (UID: \"90ad0d77-c429-467b-a32d-46be1ccd1c9b\") " pod="openstack/openstackclient" Dec 09 15:33:47 crc kubenswrapper[4716]: I1209 15:33:47.585602 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/90ad0d77-c429-467b-a32d-46be1ccd1c9b-openstack-config-secret\") pod \"openstackclient\" (UID: \"90ad0d77-c429-467b-a32d-46be1ccd1c9b\") " pod="openstack/openstackclient" Dec 09 15:33:47 crc kubenswrapper[4716]: I1209 15:33:47.599024 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j7t4q\" (UniqueName: \"kubernetes.io/projected/90ad0d77-c429-467b-a32d-46be1ccd1c9b-kube-api-access-j7t4q\") pod \"openstackclient\" (UID: \"90ad0d77-c429-467b-a32d-46be1ccd1c9b\") " pod="openstack/openstackclient" Dec 09 15:33:47 crc kubenswrapper[4716]: I1209 15:33:47.616883 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90ad0d77-c429-467b-a32d-46be1ccd1c9b-combined-ca-bundle\") pod \"openstackclient\" (UID: \"90ad0d77-c429-467b-a32d-46be1ccd1c9b\") " pod="openstack/openstackclient" Dec 09 15:33:47 crc kubenswrapper[4716]: I1209 15:33:47.717178 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Dec 09 15:33:48 crc kubenswrapper[4716]: I1209 15:33:48.374005 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Dec 09 15:33:48 crc kubenswrapper[4716]: I1209 15:33:48.528741 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-7ff4fc547f-x4v24"] Dec 09 15:33:48 crc kubenswrapper[4716]: I1209 15:33:48.560246 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-7ff4fc547f-x4v24" Dec 09 15:33:48 crc kubenswrapper[4716]: I1209 15:33:48.567904 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Dec 09 15:33:48 crc kubenswrapper[4716]: I1209 15:33:48.568416 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Dec 09 15:33:48 crc kubenswrapper[4716]: I1209 15:33:48.573830 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Dec 09 15:33:48 crc kubenswrapper[4716]: I1209 15:33:48.607479 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-7ff4fc547f-x4v24"] Dec 09 15:33:48 crc kubenswrapper[4716]: I1209 15:33:48.624839 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d12a5bad-2a74-4efa-9db3-796dde581bc7-run-httpd\") pod \"swift-proxy-7ff4fc547f-x4v24\" (UID: \"d12a5bad-2a74-4efa-9db3-796dde581bc7\") " pod="openstack/swift-proxy-7ff4fc547f-x4v24" Dec 09 15:33:48 crc kubenswrapper[4716]: I1209 15:33:48.624986 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bz9ws\" (UniqueName: \"kubernetes.io/projected/d12a5bad-2a74-4efa-9db3-796dde581bc7-kube-api-access-bz9ws\") pod \"swift-proxy-7ff4fc547f-x4v24\" (UID: \"d12a5bad-2a74-4efa-9db3-796dde581bc7\") " pod="openstack/swift-proxy-7ff4fc547f-x4v24" Dec 09 15:33:48 crc kubenswrapper[4716]: I1209 15:33:48.628139 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d12a5bad-2a74-4efa-9db3-796dde581bc7-public-tls-certs\") pod \"swift-proxy-7ff4fc547f-x4v24\" (UID: \"d12a5bad-2a74-4efa-9db3-796dde581bc7\") " pod="openstack/swift-proxy-7ff4fc547f-x4v24" Dec 09 15:33:48 crc kubenswrapper[4716]: I1209 15:33:48.628300 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d12a5bad-2a74-4efa-9db3-796dde581bc7-combined-ca-bundle\") pod \"swift-proxy-7ff4fc547f-x4v24\" (UID: \"d12a5bad-2a74-4efa-9db3-796dde581bc7\") " pod="openstack/swift-proxy-7ff4fc547f-x4v24" Dec 09 15:33:48 crc kubenswrapper[4716]: I1209 15:33:48.628364 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d12a5bad-2a74-4efa-9db3-796dde581bc7-config-data\") pod \"swift-proxy-7ff4fc547f-x4v24\" (UID: \"d12a5bad-2a74-4efa-9db3-796dde581bc7\") " pod="openstack/swift-proxy-7ff4fc547f-x4v24" Dec 09 15:33:48 crc kubenswrapper[4716]: I1209 15:33:48.628425 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d12a5bad-2a74-4efa-9db3-796dde581bc7-internal-tls-certs\") pod \"swift-proxy-7ff4fc547f-x4v24\" (UID: \"d12a5bad-2a74-4efa-9db3-796dde581bc7\") " pod="openstack/swift-proxy-7ff4fc547f-x4v24" Dec 09 15:33:48 crc kubenswrapper[4716]: I1209 15:33:48.628699 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/d12a5bad-2a74-4efa-9db3-796dde581bc7-etc-swift\") pod \"swift-proxy-7ff4fc547f-x4v24\" (UID: \"d12a5bad-2a74-4efa-9db3-796dde581bc7\") " pod="openstack/swift-proxy-7ff4fc547f-x4v24" Dec 09 15:33:48 crc kubenswrapper[4716]: I1209 15:33:48.628799 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d12a5bad-2a74-4efa-9db3-796dde581bc7-log-httpd\") pod \"swift-proxy-7ff4fc547f-x4v24\" (UID: \"d12a5bad-2a74-4efa-9db3-796dde581bc7\") " pod="openstack/swift-proxy-7ff4fc547f-x4v24" Dec 09 15:33:48 crc kubenswrapper[4716]: I1209 15:33:48.643864 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"90ad0d77-c429-467b-a32d-46be1ccd1c9b","Type":"ContainerStarted","Data":"634e70d43c63bd426a7142ef9f08005862d318749614bd99333e1679ec3b36be"} Dec 09 15:33:48 crc kubenswrapper[4716]: E1209 15:33:48.663822 4716 log.go:32] "RunPodSandbox from runtime service failed" err=< Dec 09 15:33:48 crc kubenswrapper[4716]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_openstackclient_openstack_21e53b9e-d9f0-468d-a35b-a961ad8dee41_0(d5deb85fb6f01374f852aadc7bcacc15cded7cae3712e919bdb251937b238bc6): error adding pod openstack_openstackclient to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"d5deb85fb6f01374f852aadc7bcacc15cded7cae3712e919bdb251937b238bc6" Netns:"/var/run/netns/9ff660f6-c534-41e7-bb42-9cefcfdf236f" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openstack;K8S_POD_NAME=openstackclient;K8S_POD_INFRA_CONTAINER_ID=d5deb85fb6f01374f852aadc7bcacc15cded7cae3712e919bdb251937b238bc6;K8S_POD_UID=21e53b9e-d9f0-468d-a35b-a961ad8dee41" Path:"" ERRORED: error configuring pod [openstack/openstackclient] networking: [openstack/openstackclient/21e53b9e-d9f0-468d-a35b-a961ad8dee41:ovn-kubernetes]: error adding container to network "ovn-kubernetes": CNI request failed with status 400: '[openstack/openstackclient d5deb85fb6f01374f852aadc7bcacc15cded7cae3712e919bdb251937b238bc6 network default NAD default] [openstack/openstackclient d5deb85fb6f01374f852aadc7bcacc15cded7cae3712e919bdb251937b238bc6 network default NAD default] failed to configure pod interface: canceled old pod sandbox waiting for OVS port binding for 0a:58:0a:d9:00:cc [10.217.0.204/23] Dec 09 15:33:48 crc kubenswrapper[4716]: ' Dec 09 15:33:48 crc kubenswrapper[4716]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Dec 09 15:33:48 crc kubenswrapper[4716]: > Dec 09 15:33:48 crc kubenswrapper[4716]: E1209 15:33:48.664195 4716 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err=< Dec 09 15:33:48 crc kubenswrapper[4716]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_openstackclient_openstack_21e53b9e-d9f0-468d-a35b-a961ad8dee41_0(d5deb85fb6f01374f852aadc7bcacc15cded7cae3712e919bdb251937b238bc6): error adding pod openstack_openstackclient to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"d5deb85fb6f01374f852aadc7bcacc15cded7cae3712e919bdb251937b238bc6" Netns:"/var/run/netns/9ff660f6-c534-41e7-bb42-9cefcfdf236f" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openstack;K8S_POD_NAME=openstackclient;K8S_POD_INFRA_CONTAINER_ID=d5deb85fb6f01374f852aadc7bcacc15cded7cae3712e919bdb251937b238bc6;K8S_POD_UID=21e53b9e-d9f0-468d-a35b-a961ad8dee41" Path:"" ERRORED: error configuring pod [openstack/openstackclient] networking: [openstack/openstackclient/21e53b9e-d9f0-468d-a35b-a961ad8dee41:ovn-kubernetes]: error adding container to network "ovn-kubernetes": CNI request failed with status 400: '[openstack/openstackclient d5deb85fb6f01374f852aadc7bcacc15cded7cae3712e919bdb251937b238bc6 network default NAD default] [openstack/openstackclient d5deb85fb6f01374f852aadc7bcacc15cded7cae3712e919bdb251937b238bc6 network default NAD default] failed to configure pod interface: canceled old pod sandbox waiting for OVS port binding for 0a:58:0a:d9:00:cc [10.217.0.204/23] Dec 09 15:33:48 crc kubenswrapper[4716]: ' Dec 09 15:33:48 crc kubenswrapper[4716]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Dec 09 15:33:48 crc kubenswrapper[4716]: > pod="openstack/openstackclient" Dec 09 15:33:48 crc kubenswrapper[4716]: I1209 15:33:48.738319 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bz9ws\" (UniqueName: \"kubernetes.io/projected/d12a5bad-2a74-4efa-9db3-796dde581bc7-kube-api-access-bz9ws\") pod \"swift-proxy-7ff4fc547f-x4v24\" (UID: \"d12a5bad-2a74-4efa-9db3-796dde581bc7\") " pod="openstack/swift-proxy-7ff4fc547f-x4v24" Dec 09 15:33:48 crc kubenswrapper[4716]: I1209 15:33:48.738394 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d12a5bad-2a74-4efa-9db3-796dde581bc7-public-tls-certs\") pod \"swift-proxy-7ff4fc547f-x4v24\" (UID: \"d12a5bad-2a74-4efa-9db3-796dde581bc7\") " pod="openstack/swift-proxy-7ff4fc547f-x4v24" Dec 09 15:33:48 crc kubenswrapper[4716]: I1209 15:33:48.738429 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d12a5bad-2a74-4efa-9db3-796dde581bc7-combined-ca-bundle\") pod \"swift-proxy-7ff4fc547f-x4v24\" (UID: \"d12a5bad-2a74-4efa-9db3-796dde581bc7\") " pod="openstack/swift-proxy-7ff4fc547f-x4v24" Dec 09 15:33:48 crc kubenswrapper[4716]: I1209 15:33:48.738460 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d12a5bad-2a74-4efa-9db3-796dde581bc7-config-data\") pod \"swift-proxy-7ff4fc547f-x4v24\" (UID: \"d12a5bad-2a74-4efa-9db3-796dde581bc7\") " pod="openstack/swift-proxy-7ff4fc547f-x4v24" Dec 09 15:33:48 crc kubenswrapper[4716]: I1209 15:33:48.738499 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d12a5bad-2a74-4efa-9db3-796dde581bc7-internal-tls-certs\") pod \"swift-proxy-7ff4fc547f-x4v24\" (UID: \"d12a5bad-2a74-4efa-9db3-796dde581bc7\") " pod="openstack/swift-proxy-7ff4fc547f-x4v24" Dec 09 15:33:48 crc kubenswrapper[4716]: I1209 15:33:48.738552 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/d12a5bad-2a74-4efa-9db3-796dde581bc7-etc-swift\") pod \"swift-proxy-7ff4fc547f-x4v24\" (UID: \"d12a5bad-2a74-4efa-9db3-796dde581bc7\") " pod="openstack/swift-proxy-7ff4fc547f-x4v24" Dec 09 15:33:48 crc kubenswrapper[4716]: I1209 15:33:48.738581 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d12a5bad-2a74-4efa-9db3-796dde581bc7-log-httpd\") pod \"swift-proxy-7ff4fc547f-x4v24\" (UID: \"d12a5bad-2a74-4efa-9db3-796dde581bc7\") " pod="openstack/swift-proxy-7ff4fc547f-x4v24" Dec 09 15:33:48 crc kubenswrapper[4716]: I1209 15:33:48.738664 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d12a5bad-2a74-4efa-9db3-796dde581bc7-run-httpd\") pod \"swift-proxy-7ff4fc547f-x4v24\" (UID: \"d12a5bad-2a74-4efa-9db3-796dde581bc7\") " pod="openstack/swift-proxy-7ff4fc547f-x4v24" Dec 09 15:33:48 crc kubenswrapper[4716]: I1209 15:33:48.739574 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d12a5bad-2a74-4efa-9db3-796dde581bc7-log-httpd\") pod \"swift-proxy-7ff4fc547f-x4v24\" (UID: \"d12a5bad-2a74-4efa-9db3-796dde581bc7\") " pod="openstack/swift-proxy-7ff4fc547f-x4v24" Dec 09 15:33:48 crc kubenswrapper[4716]: I1209 15:33:48.739659 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d12a5bad-2a74-4efa-9db3-796dde581bc7-run-httpd\") pod \"swift-proxy-7ff4fc547f-x4v24\" (UID: \"d12a5bad-2a74-4efa-9db3-796dde581bc7\") " pod="openstack/swift-proxy-7ff4fc547f-x4v24" Dec 09 15:33:48 crc kubenswrapper[4716]: I1209 15:33:48.754092 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d12a5bad-2a74-4efa-9db3-796dde581bc7-config-data\") pod \"swift-proxy-7ff4fc547f-x4v24\" (UID: \"d12a5bad-2a74-4efa-9db3-796dde581bc7\") " pod="openstack/swift-proxy-7ff4fc547f-x4v24" Dec 09 15:33:48 crc kubenswrapper[4716]: I1209 15:33:48.754691 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/d12a5bad-2a74-4efa-9db3-796dde581bc7-etc-swift\") pod \"swift-proxy-7ff4fc547f-x4v24\" (UID: \"d12a5bad-2a74-4efa-9db3-796dde581bc7\") " pod="openstack/swift-proxy-7ff4fc547f-x4v24" Dec 09 15:33:48 crc kubenswrapper[4716]: I1209 15:33:48.756208 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d12a5bad-2a74-4efa-9db3-796dde581bc7-combined-ca-bundle\") pod \"swift-proxy-7ff4fc547f-x4v24\" (UID: \"d12a5bad-2a74-4efa-9db3-796dde581bc7\") " pod="openstack/swift-proxy-7ff4fc547f-x4v24" Dec 09 15:33:48 crc kubenswrapper[4716]: I1209 15:33:48.759243 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d12a5bad-2a74-4efa-9db3-796dde581bc7-internal-tls-certs\") pod \"swift-proxy-7ff4fc547f-x4v24\" (UID: \"d12a5bad-2a74-4efa-9db3-796dde581bc7\") " pod="openstack/swift-proxy-7ff4fc547f-x4v24" Dec 09 15:33:48 crc kubenswrapper[4716]: I1209 15:33:48.759780 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d12a5bad-2a74-4efa-9db3-796dde581bc7-public-tls-certs\") pod \"swift-proxy-7ff4fc547f-x4v24\" (UID: \"d12a5bad-2a74-4efa-9db3-796dde581bc7\") " pod="openstack/swift-proxy-7ff4fc547f-x4v24" Dec 09 15:33:48 crc kubenswrapper[4716]: I1209 15:33:48.771002 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bz9ws\" (UniqueName: \"kubernetes.io/projected/d12a5bad-2a74-4efa-9db3-796dde581bc7-kube-api-access-bz9ws\") pod \"swift-proxy-7ff4fc547f-x4v24\" (UID: \"d12a5bad-2a74-4efa-9db3-796dde581bc7\") " pod="openstack/swift-proxy-7ff4fc547f-x4v24" Dec 09 15:33:48 crc kubenswrapper[4716]: I1209 15:33:48.917878 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-7ff4fc547f-x4v24" Dec 09 15:33:49 crc kubenswrapper[4716]: I1209 15:33:49.658516 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Dec 09 15:33:49 crc kubenswrapper[4716]: I1209 15:33:49.676198 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Dec 09 15:33:49 crc kubenswrapper[4716]: I1209 15:33:49.679475 4716 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="21e53b9e-d9f0-468d-a35b-a961ad8dee41" podUID="90ad0d77-c429-467b-a32d-46be1ccd1c9b" Dec 09 15:33:49 crc kubenswrapper[4716]: I1209 15:33:49.758785 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-7ff4fc547f-x4v24"] Dec 09 15:33:49 crc kubenswrapper[4716]: I1209 15:33:49.763698 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/21e53b9e-d9f0-468d-a35b-a961ad8dee41-openstack-config-secret\") pod \"21e53b9e-d9f0-468d-a35b-a961ad8dee41\" (UID: \"21e53b9e-d9f0-468d-a35b-a961ad8dee41\") " Dec 09 15:33:49 crc kubenswrapper[4716]: I1209 15:33:49.763750 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jrc27\" (UniqueName: \"kubernetes.io/projected/21e53b9e-d9f0-468d-a35b-a961ad8dee41-kube-api-access-jrc27\") pod \"21e53b9e-d9f0-468d-a35b-a961ad8dee41\" (UID: \"21e53b9e-d9f0-468d-a35b-a961ad8dee41\") " Dec 09 15:33:49 crc kubenswrapper[4716]: I1209 15:33:49.764047 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21e53b9e-d9f0-468d-a35b-a961ad8dee41-combined-ca-bundle\") pod \"21e53b9e-d9f0-468d-a35b-a961ad8dee41\" (UID: \"21e53b9e-d9f0-468d-a35b-a961ad8dee41\") " Dec 09 15:33:49 crc kubenswrapper[4716]: I1209 15:33:49.764132 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/21e53b9e-d9f0-468d-a35b-a961ad8dee41-openstack-config\") pod \"21e53b9e-d9f0-468d-a35b-a961ad8dee41\" (UID: \"21e53b9e-d9f0-468d-a35b-a961ad8dee41\") " Dec 09 15:33:49 crc kubenswrapper[4716]: I1209 15:33:49.765460 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/21e53b9e-d9f0-468d-a35b-a961ad8dee41-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "21e53b9e-d9f0-468d-a35b-a961ad8dee41" (UID: "21e53b9e-d9f0-468d-a35b-a961ad8dee41"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:33:49 crc kubenswrapper[4716]: I1209 15:33:49.770351 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/21e53b9e-d9f0-468d-a35b-a961ad8dee41-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "21e53b9e-d9f0-468d-a35b-a961ad8dee41" (UID: "21e53b9e-d9f0-468d-a35b-a961ad8dee41"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:33:49 crc kubenswrapper[4716]: I1209 15:33:49.773708 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/21e53b9e-d9f0-468d-a35b-a961ad8dee41-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "21e53b9e-d9f0-468d-a35b-a961ad8dee41" (UID: "21e53b9e-d9f0-468d-a35b-a961ad8dee41"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:33:49 crc kubenswrapper[4716]: I1209 15:33:49.773784 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/21e53b9e-d9f0-468d-a35b-a961ad8dee41-kube-api-access-jrc27" (OuterVolumeSpecName: "kube-api-access-jrc27") pod "21e53b9e-d9f0-468d-a35b-a961ad8dee41" (UID: "21e53b9e-d9f0-468d-a35b-a961ad8dee41"). InnerVolumeSpecName "kube-api-access-jrc27". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:33:49 crc kubenswrapper[4716]: W1209 15:33:49.779991 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd12a5bad_2a74_4efa_9db3_796dde581bc7.slice/crio-a2b7d856f6625e1b35167f4601bbad49a362f623136317c7bb36b73e1842e68d WatchSource:0}: Error finding container a2b7d856f6625e1b35167f4601bbad49a362f623136317c7bb36b73e1842e68d: Status 404 returned error can't find the container with id a2b7d856f6625e1b35167f4601bbad49a362f623136317c7bb36b73e1842e68d Dec 09 15:33:49 crc kubenswrapper[4716]: I1209 15:33:49.867076 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21e53b9e-d9f0-468d-a35b-a961ad8dee41-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:49 crc kubenswrapper[4716]: I1209 15:33:49.867113 4716 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/21e53b9e-d9f0-468d-a35b-a961ad8dee41-openstack-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:49 crc kubenswrapper[4716]: I1209 15:33:49.867124 4716 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/21e53b9e-d9f0-468d-a35b-a961ad8dee41-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:49 crc kubenswrapper[4716]: I1209 15:33:49.867138 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jrc27\" (UniqueName: \"kubernetes.io/projected/21e53b9e-d9f0-468d-a35b-a961ad8dee41-kube-api-access-jrc27\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:50 crc kubenswrapper[4716]: I1209 15:33:50.635131 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Dec 09 15:33:50 crc kubenswrapper[4716]: I1209 15:33:50.685308 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Dec 09 15:33:50 crc kubenswrapper[4716]: I1209 15:33:50.688988 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7ff4fc547f-x4v24" event={"ID":"d12a5bad-2a74-4efa-9db3-796dde581bc7","Type":"ContainerStarted","Data":"b194e0e27e7087ce8da6b5d20b48408e7d82422c2da59e80cdf054c952cf9901"} Dec 09 15:33:50 crc kubenswrapper[4716]: I1209 15:33:50.689034 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7ff4fc547f-x4v24" event={"ID":"d12a5bad-2a74-4efa-9db3-796dde581bc7","Type":"ContainerStarted","Data":"a2b7d856f6625e1b35167f4601bbad49a362f623136317c7bb36b73e1842e68d"} Dec 09 15:33:50 crc kubenswrapper[4716]: I1209 15:33:50.718098 4716 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="21e53b9e-d9f0-468d-a35b-a961ad8dee41" podUID="90ad0d77-c429-467b-a32d-46be1ccd1c9b" Dec 09 15:33:51 crc kubenswrapper[4716]: I1209 15:33:51.229761 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="21e53b9e-d9f0-468d-a35b-a961ad8dee41" path="/var/lib/kubelet/pods/21e53b9e-d9f0-468d-a35b-a961ad8dee41/volumes" Dec 09 15:33:51 crc kubenswrapper[4716]: I1209 15:33:51.625451 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:33:51 crc kubenswrapper[4716]: I1209 15:33:51.626146 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="04ee94e7-2ee0-4fbb-9145-22effa8ab009" containerName="ceilometer-central-agent" containerID="cri-o://a1d3b3a81b668ef739d227b362886584f776dcca3b811a4c7b5a2c259ec330b2" gracePeriod=30 Dec 09 15:33:51 crc kubenswrapper[4716]: I1209 15:33:51.626225 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="04ee94e7-2ee0-4fbb-9145-22effa8ab009" containerName="proxy-httpd" containerID="cri-o://4134efb47f3e03f54ffc809ce1964c0941b0f7fccea982956d4a002a1dfc152e" gracePeriod=30 Dec 09 15:33:51 crc kubenswrapper[4716]: I1209 15:33:51.626233 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="04ee94e7-2ee0-4fbb-9145-22effa8ab009" containerName="sg-core" containerID="cri-o://4ca96384d533b03a05a7480a64c81fe0f0f3605373f37166698ac991432ab754" gracePeriod=30 Dec 09 15:33:51 crc kubenswrapper[4716]: I1209 15:33:51.626310 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="04ee94e7-2ee0-4fbb-9145-22effa8ab009" containerName="ceilometer-notification-agent" containerID="cri-o://7a59b8aa87fb92ce4a8dcc3201948518059b9a27226748d843452d660b84f71d" gracePeriod=30 Dec 09 15:33:51 crc kubenswrapper[4716]: I1209 15:33:51.656255 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="04ee94e7-2ee0-4fbb-9145-22effa8ab009" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 502" Dec 09 15:33:51 crc kubenswrapper[4716]: I1209 15:33:51.701419 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7ff4fc547f-x4v24" event={"ID":"d12a5bad-2a74-4efa-9db3-796dde581bc7","Type":"ContainerStarted","Data":"2fdcac7098973a60f8d9162df07eccc5f8bd006a55a919ca35e6ca9c4b8a290e"} Dec 09 15:33:51 crc kubenswrapper[4716]: I1209 15:33:51.702398 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-7ff4fc547f-x4v24" Dec 09 15:33:51 crc kubenswrapper[4716]: I1209 15:33:51.702445 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-7ff4fc547f-x4v24" Dec 09 15:33:51 crc kubenswrapper[4716]: I1209 15:33:51.728141 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-7ff4fc547f-x4v24" podStartSLOduration=3.728115199 podStartE2EDuration="3.728115199s" podCreationTimestamp="2025-12-09 15:33:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:33:51.725517454 +0000 UTC m=+1518.880261442" watchObservedRunningTime="2025-12-09 15:33:51.728115199 +0000 UTC m=+1518.882859187" Dec 09 15:33:52 crc kubenswrapper[4716]: I1209 15:33:52.737790 4716 generic.go:334] "Generic (PLEG): container finished" podID="04ee94e7-2ee0-4fbb-9145-22effa8ab009" containerID="4134efb47f3e03f54ffc809ce1964c0941b0f7fccea982956d4a002a1dfc152e" exitCode=0 Dec 09 15:33:52 crc kubenswrapper[4716]: I1209 15:33:52.738143 4716 generic.go:334] "Generic (PLEG): container finished" podID="04ee94e7-2ee0-4fbb-9145-22effa8ab009" containerID="4ca96384d533b03a05a7480a64c81fe0f0f3605373f37166698ac991432ab754" exitCode=2 Dec 09 15:33:52 crc kubenswrapper[4716]: I1209 15:33:52.738153 4716 generic.go:334] "Generic (PLEG): container finished" podID="04ee94e7-2ee0-4fbb-9145-22effa8ab009" containerID="a1d3b3a81b668ef739d227b362886584f776dcca3b811a4c7b5a2c259ec330b2" exitCode=0 Dec 09 15:33:52 crc kubenswrapper[4716]: I1209 15:33:52.739467 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"04ee94e7-2ee0-4fbb-9145-22effa8ab009","Type":"ContainerDied","Data":"4134efb47f3e03f54ffc809ce1964c0941b0f7fccea982956d4a002a1dfc152e"} Dec 09 15:33:52 crc kubenswrapper[4716]: I1209 15:33:52.739513 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"04ee94e7-2ee0-4fbb-9145-22effa8ab009","Type":"ContainerDied","Data":"4ca96384d533b03a05a7480a64c81fe0f0f3605373f37166698ac991432ab754"} Dec 09 15:33:52 crc kubenswrapper[4716]: I1209 15:33:52.739528 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"04ee94e7-2ee0-4fbb-9145-22effa8ab009","Type":"ContainerDied","Data":"a1d3b3a81b668ef739d227b362886584f776dcca3b811a4c7b5a2c259ec330b2"} Dec 09 15:33:54 crc kubenswrapper[4716]: I1209 15:33:54.276641 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-85qhw" Dec 09 15:33:54 crc kubenswrapper[4716]: I1209 15:33:54.277023 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-85qhw" Dec 09 15:33:54 crc kubenswrapper[4716]: I1209 15:33:54.349267 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-85qhw" Dec 09 15:33:54 crc kubenswrapper[4716]: I1209 15:33:54.783823 4716 generic.go:334] "Generic (PLEG): container finished" podID="04ee94e7-2ee0-4fbb-9145-22effa8ab009" containerID="7a59b8aa87fb92ce4a8dcc3201948518059b9a27226748d843452d660b84f71d" exitCode=0 Dec 09 15:33:54 crc kubenswrapper[4716]: I1209 15:33:54.784422 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"04ee94e7-2ee0-4fbb-9145-22effa8ab009","Type":"ContainerDied","Data":"7a59b8aa87fb92ce4a8dcc3201948518059b9a27226748d843452d660b84f71d"} Dec 09 15:33:54 crc kubenswrapper[4716]: I1209 15:33:54.877226 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-85qhw" Dec 09 15:33:54 crc kubenswrapper[4716]: I1209 15:33:54.936933 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-85qhw"] Dec 09 15:33:54 crc kubenswrapper[4716]: I1209 15:33:54.994353 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.044459 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9w8g2\" (UniqueName: \"kubernetes.io/projected/04ee94e7-2ee0-4fbb-9145-22effa8ab009-kube-api-access-9w8g2\") pod \"04ee94e7-2ee0-4fbb-9145-22effa8ab009\" (UID: \"04ee94e7-2ee0-4fbb-9145-22effa8ab009\") " Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.044590 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04ee94e7-2ee0-4fbb-9145-22effa8ab009-scripts\") pod \"04ee94e7-2ee0-4fbb-9145-22effa8ab009\" (UID: \"04ee94e7-2ee0-4fbb-9145-22effa8ab009\") " Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.044671 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/04ee94e7-2ee0-4fbb-9145-22effa8ab009-sg-core-conf-yaml\") pod \"04ee94e7-2ee0-4fbb-9145-22effa8ab009\" (UID: \"04ee94e7-2ee0-4fbb-9145-22effa8ab009\") " Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.044778 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04ee94e7-2ee0-4fbb-9145-22effa8ab009-combined-ca-bundle\") pod \"04ee94e7-2ee0-4fbb-9145-22effa8ab009\" (UID: \"04ee94e7-2ee0-4fbb-9145-22effa8ab009\") " Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.044851 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/04ee94e7-2ee0-4fbb-9145-22effa8ab009-log-httpd\") pod \"04ee94e7-2ee0-4fbb-9145-22effa8ab009\" (UID: \"04ee94e7-2ee0-4fbb-9145-22effa8ab009\") " Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.044919 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04ee94e7-2ee0-4fbb-9145-22effa8ab009-config-data\") pod \"04ee94e7-2ee0-4fbb-9145-22effa8ab009\" (UID: \"04ee94e7-2ee0-4fbb-9145-22effa8ab009\") " Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.044976 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/04ee94e7-2ee0-4fbb-9145-22effa8ab009-run-httpd\") pod \"04ee94e7-2ee0-4fbb-9145-22effa8ab009\" (UID: \"04ee94e7-2ee0-4fbb-9145-22effa8ab009\") " Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.046862 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/04ee94e7-2ee0-4fbb-9145-22effa8ab009-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "04ee94e7-2ee0-4fbb-9145-22effa8ab009" (UID: "04ee94e7-2ee0-4fbb-9145-22effa8ab009"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.047339 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/04ee94e7-2ee0-4fbb-9145-22effa8ab009-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "04ee94e7-2ee0-4fbb-9145-22effa8ab009" (UID: "04ee94e7-2ee0-4fbb-9145-22effa8ab009"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.051864 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04ee94e7-2ee0-4fbb-9145-22effa8ab009-scripts" (OuterVolumeSpecName: "scripts") pod "04ee94e7-2ee0-4fbb-9145-22effa8ab009" (UID: "04ee94e7-2ee0-4fbb-9145-22effa8ab009"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.052863 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/04ee94e7-2ee0-4fbb-9145-22effa8ab009-kube-api-access-9w8g2" (OuterVolumeSpecName: "kube-api-access-9w8g2") pod "04ee94e7-2ee0-4fbb-9145-22effa8ab009" (UID: "04ee94e7-2ee0-4fbb-9145-22effa8ab009"). InnerVolumeSpecName "kube-api-access-9w8g2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.129317 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04ee94e7-2ee0-4fbb-9145-22effa8ab009-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "04ee94e7-2ee0-4fbb-9145-22effa8ab009" (UID: "04ee94e7-2ee0-4fbb-9145-22effa8ab009"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.148338 4716 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04ee94e7-2ee0-4fbb-9145-22effa8ab009-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.148380 4716 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/04ee94e7-2ee0-4fbb-9145-22effa8ab009-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.148394 4716 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/04ee94e7-2ee0-4fbb-9145-22effa8ab009-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.148406 4716 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/04ee94e7-2ee0-4fbb-9145-22effa8ab009-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.148447 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9w8g2\" (UniqueName: \"kubernetes.io/projected/04ee94e7-2ee0-4fbb-9145-22effa8ab009-kube-api-access-9w8g2\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.206909 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04ee94e7-2ee0-4fbb-9145-22effa8ab009-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "04ee94e7-2ee0-4fbb-9145-22effa8ab009" (UID: "04ee94e7-2ee0-4fbb-9145-22effa8ab009"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.250792 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04ee94e7-2ee0-4fbb-9145-22effa8ab009-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.261262 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04ee94e7-2ee0-4fbb-9145-22effa8ab009-config-data" (OuterVolumeSpecName: "config-data") pod "04ee94e7-2ee0-4fbb-9145-22effa8ab009" (UID: "04ee94e7-2ee0-4fbb-9145-22effa8ab009"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.353383 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04ee94e7-2ee0-4fbb-9145-22effa8ab009-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.805282 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"04ee94e7-2ee0-4fbb-9145-22effa8ab009","Type":"ContainerDied","Data":"889e5a96f5b19af7d87b18a5501a387a9b1e986151fa1f46fe526b4095d9a557"} Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.805361 4716 scope.go:117] "RemoveContainer" containerID="4134efb47f3e03f54ffc809ce1964c0941b0f7fccea982956d4a002a1dfc152e" Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.805434 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.885297 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.929835 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.958693 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:33:55 crc kubenswrapper[4716]: E1209 15:33:55.961267 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04ee94e7-2ee0-4fbb-9145-22effa8ab009" containerName="proxy-httpd" Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.961637 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="04ee94e7-2ee0-4fbb-9145-22effa8ab009" containerName="proxy-httpd" Dec 09 15:33:55 crc kubenswrapper[4716]: E1209 15:33:55.961753 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04ee94e7-2ee0-4fbb-9145-22effa8ab009" containerName="ceilometer-central-agent" Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.961841 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="04ee94e7-2ee0-4fbb-9145-22effa8ab009" containerName="ceilometer-central-agent" Dec 09 15:33:55 crc kubenswrapper[4716]: E1209 15:33:55.961987 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04ee94e7-2ee0-4fbb-9145-22effa8ab009" containerName="ceilometer-notification-agent" Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.962069 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="04ee94e7-2ee0-4fbb-9145-22effa8ab009" containerName="ceilometer-notification-agent" Dec 09 15:33:55 crc kubenswrapper[4716]: E1209 15:33:55.962151 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04ee94e7-2ee0-4fbb-9145-22effa8ab009" containerName="sg-core" Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.962232 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="04ee94e7-2ee0-4fbb-9145-22effa8ab009" containerName="sg-core" Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.962707 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="04ee94e7-2ee0-4fbb-9145-22effa8ab009" containerName="ceilometer-central-agent" Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.964428 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="04ee94e7-2ee0-4fbb-9145-22effa8ab009" containerName="sg-core" Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.964613 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="04ee94e7-2ee0-4fbb-9145-22effa8ab009" containerName="proxy-httpd" Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.964735 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="04ee94e7-2ee0-4fbb-9145-22effa8ab009" containerName="ceilometer-notification-agent" Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.968958 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.974047 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.974168 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 09 15:33:55 crc kubenswrapper[4716]: I1209 15:33:55.974293 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.076612 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2468f42-d745-495c-b068-7ae04e50ee4a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c2468f42-d745-495c-b068-7ae04e50ee4a\") " pod="openstack/ceilometer-0" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.076717 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2468f42-d745-495c-b068-7ae04e50ee4a-config-data\") pod \"ceilometer-0\" (UID: \"c2468f42-d745-495c-b068-7ae04e50ee4a\") " pod="openstack/ceilometer-0" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.076806 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2468f42-d745-495c-b068-7ae04e50ee4a-scripts\") pod \"ceilometer-0\" (UID: \"c2468f42-d745-495c-b068-7ae04e50ee4a\") " pod="openstack/ceilometer-0" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.076888 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c2468f42-d745-495c-b068-7ae04e50ee4a-run-httpd\") pod \"ceilometer-0\" (UID: \"c2468f42-d745-495c-b068-7ae04e50ee4a\") " pod="openstack/ceilometer-0" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.076931 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c2468f42-d745-495c-b068-7ae04e50ee4a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c2468f42-d745-495c-b068-7ae04e50ee4a\") " pod="openstack/ceilometer-0" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.076971 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c2468f42-d745-495c-b068-7ae04e50ee4a-log-httpd\") pod \"ceilometer-0\" (UID: \"c2468f42-d745-495c-b068-7ae04e50ee4a\") " pod="openstack/ceilometer-0" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.077019 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-95jhm\" (UniqueName: \"kubernetes.io/projected/c2468f42-d745-495c-b068-7ae04e50ee4a-kube-api-access-95jhm\") pod \"ceilometer-0\" (UID: \"c2468f42-d745-495c-b068-7ae04e50ee4a\") " pod="openstack/ceilometer-0" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.178755 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2468f42-d745-495c-b068-7ae04e50ee4a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c2468f42-d745-495c-b068-7ae04e50ee4a\") " pod="openstack/ceilometer-0" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.179107 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2468f42-d745-495c-b068-7ae04e50ee4a-config-data\") pod \"ceilometer-0\" (UID: \"c2468f42-d745-495c-b068-7ae04e50ee4a\") " pod="openstack/ceilometer-0" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.179316 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2468f42-d745-495c-b068-7ae04e50ee4a-scripts\") pod \"ceilometer-0\" (UID: \"c2468f42-d745-495c-b068-7ae04e50ee4a\") " pod="openstack/ceilometer-0" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.179476 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c2468f42-d745-495c-b068-7ae04e50ee4a-run-httpd\") pod \"ceilometer-0\" (UID: \"c2468f42-d745-495c-b068-7ae04e50ee4a\") " pod="openstack/ceilometer-0" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.179611 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c2468f42-d745-495c-b068-7ae04e50ee4a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c2468f42-d745-495c-b068-7ae04e50ee4a\") " pod="openstack/ceilometer-0" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.179776 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c2468f42-d745-495c-b068-7ae04e50ee4a-log-httpd\") pod \"ceilometer-0\" (UID: \"c2468f42-d745-495c-b068-7ae04e50ee4a\") " pod="openstack/ceilometer-0" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.179950 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-95jhm\" (UniqueName: \"kubernetes.io/projected/c2468f42-d745-495c-b068-7ae04e50ee4a-kube-api-access-95jhm\") pod \"ceilometer-0\" (UID: \"c2468f42-d745-495c-b068-7ae04e50ee4a\") " pod="openstack/ceilometer-0" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.180502 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c2468f42-d745-495c-b068-7ae04e50ee4a-log-httpd\") pod \"ceilometer-0\" (UID: \"c2468f42-d745-495c-b068-7ae04e50ee4a\") " pod="openstack/ceilometer-0" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.181029 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c2468f42-d745-495c-b068-7ae04e50ee4a-run-httpd\") pod \"ceilometer-0\" (UID: \"c2468f42-d745-495c-b068-7ae04e50ee4a\") " pod="openstack/ceilometer-0" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.189228 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2468f42-d745-495c-b068-7ae04e50ee4a-scripts\") pod \"ceilometer-0\" (UID: \"c2468f42-d745-495c-b068-7ae04e50ee4a\") " pod="openstack/ceilometer-0" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.192691 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c2468f42-d745-495c-b068-7ae04e50ee4a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c2468f42-d745-495c-b068-7ae04e50ee4a\") " pod="openstack/ceilometer-0" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.194040 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2468f42-d745-495c-b068-7ae04e50ee4a-config-data\") pod \"ceilometer-0\" (UID: \"c2468f42-d745-495c-b068-7ae04e50ee4a\") " pod="openstack/ceilometer-0" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.219496 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2468f42-d745-495c-b068-7ae04e50ee4a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c2468f42-d745-495c-b068-7ae04e50ee4a\") " pod="openstack/ceilometer-0" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.346882 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-84cd58648f-wb5xh"] Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.360879 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-84cd58648f-wb5xh" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.363666 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-95jhm\" (UniqueName: \"kubernetes.io/projected/c2468f42-d745-495c-b068-7ae04e50ee4a-kube-api-access-95jhm\") pod \"ceilometer-0\" (UID: \"c2468f42-d745-495c-b068-7ae04e50ee4a\") " pod="openstack/ceilometer-0" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.378872 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-x646c" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.379252 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.380868 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-engine-config-data" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.417180 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-84cd58648f-wb5xh"] Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.489176 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6b6ca6f-0338-48e9-9397-61cf76346b26-combined-ca-bundle\") pod \"heat-engine-84cd58648f-wb5xh\" (UID: \"b6b6ca6f-0338-48e9-9397-61cf76346b26\") " pod="openstack/heat-engine-84cd58648f-wb5xh" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.489286 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6b6ca6f-0338-48e9-9397-61cf76346b26-config-data\") pod \"heat-engine-84cd58648f-wb5xh\" (UID: \"b6b6ca6f-0338-48e9-9397-61cf76346b26\") " pod="openstack/heat-engine-84cd58648f-wb5xh" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.489510 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b6b6ca6f-0338-48e9-9397-61cf76346b26-config-data-custom\") pod \"heat-engine-84cd58648f-wb5xh\" (UID: \"b6b6ca6f-0338-48e9-9397-61cf76346b26\") " pod="openstack/heat-engine-84cd58648f-wb5xh" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.489562 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nst8v\" (UniqueName: \"kubernetes.io/projected/b6b6ca6f-0338-48e9-9397-61cf76346b26-kube-api-access-nst8v\") pod \"heat-engine-84cd58648f-wb5xh\" (UID: \"b6b6ca6f-0338-48e9-9397-61cf76346b26\") " pod="openstack/heat-engine-84cd58648f-wb5xh" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.594328 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b6b6ca6f-0338-48e9-9397-61cf76346b26-config-data-custom\") pod \"heat-engine-84cd58648f-wb5xh\" (UID: \"b6b6ca6f-0338-48e9-9397-61cf76346b26\") " pod="openstack/heat-engine-84cd58648f-wb5xh" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.594409 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nst8v\" (UniqueName: \"kubernetes.io/projected/b6b6ca6f-0338-48e9-9397-61cf76346b26-kube-api-access-nst8v\") pod \"heat-engine-84cd58648f-wb5xh\" (UID: \"b6b6ca6f-0338-48e9-9397-61cf76346b26\") " pod="openstack/heat-engine-84cd58648f-wb5xh" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.594465 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6b6ca6f-0338-48e9-9397-61cf76346b26-combined-ca-bundle\") pod \"heat-engine-84cd58648f-wb5xh\" (UID: \"b6b6ca6f-0338-48e9-9397-61cf76346b26\") " pod="openstack/heat-engine-84cd58648f-wb5xh" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.594551 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6b6ca6f-0338-48e9-9397-61cf76346b26-config-data\") pod \"heat-engine-84cd58648f-wb5xh\" (UID: \"b6b6ca6f-0338-48e9-9397-61cf76346b26\") " pod="openstack/heat-engine-84cd58648f-wb5xh" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.599450 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.613078 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6b6ca6f-0338-48e9-9397-61cf76346b26-combined-ca-bundle\") pod \"heat-engine-84cd58648f-wb5xh\" (UID: \"b6b6ca6f-0338-48e9-9397-61cf76346b26\") " pod="openstack/heat-engine-84cd58648f-wb5xh" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.616296 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6b6ca6f-0338-48e9-9397-61cf76346b26-config-data\") pod \"heat-engine-84cd58648f-wb5xh\" (UID: \"b6b6ca6f-0338-48e9-9397-61cf76346b26\") " pod="openstack/heat-engine-84cd58648f-wb5xh" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.618483 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b6b6ca6f-0338-48e9-9397-61cf76346b26-config-data-custom\") pod \"heat-engine-84cd58648f-wb5xh\" (UID: \"b6b6ca6f-0338-48e9-9397-61cf76346b26\") " pod="openstack/heat-engine-84cd58648f-wb5xh" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.636440 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nst8v\" (UniqueName: \"kubernetes.io/projected/b6b6ca6f-0338-48e9-9397-61cf76346b26-kube-api-access-nst8v\") pod \"heat-engine-84cd58648f-wb5xh\" (UID: \"b6b6ca6f-0338-48e9-9397-61cf76346b26\") " pod="openstack/heat-engine-84cd58648f-wb5xh" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.712922 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7756b9d78c-h2zld"] Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.720346 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7756b9d78c-h2zld" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.749301 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7756b9d78c-h2zld"] Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.806043 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-7b57c6bb6b-lx7wx"] Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.813274 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7b57c6bb6b-lx7wx" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.819973 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-84cd58648f-wb5xh" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.842028 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-cfnapi-config-data" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.858872 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-85qhw" podUID="1075c22d-76f2-4c48-86b1-23374040e152" containerName="registry-server" containerID="cri-o://a692febd399ac1eae8f990122567fc7d3c009d489f4f505a13d1eac063480af7" gracePeriod=2 Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.871363 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-7b57c6bb6b-lx7wx"] Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.884029 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-5fb7859fb4-vxfkf"] Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.885649 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5fb7859fb4-vxfkf" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.898220 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-api-config-data" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.902758 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w5sbp\" (UniqueName: \"kubernetes.io/projected/e9202dd4-4544-4a66-a4da-160c8c918cac-kube-api-access-w5sbp\") pod \"heat-cfnapi-7b57c6bb6b-lx7wx\" (UID: \"e9202dd4-4544-4a66-a4da-160c8c918cac\") " pod="openstack/heat-cfnapi-7b57c6bb6b-lx7wx" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.902871 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9202dd4-4544-4a66-a4da-160c8c918cac-config-data\") pod \"heat-cfnapi-7b57c6bb6b-lx7wx\" (UID: \"e9202dd4-4544-4a66-a4da-160c8c918cac\") " pod="openstack/heat-cfnapi-7b57c6bb6b-lx7wx" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.903004 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1893ef85-1e86-4247-8950-95c26985ccbc-ovsdbserver-nb\") pod \"dnsmasq-dns-7756b9d78c-h2zld\" (UID: \"1893ef85-1e86-4247-8950-95c26985ccbc\") " pod="openstack/dnsmasq-dns-7756b9d78c-h2zld" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.903058 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1893ef85-1e86-4247-8950-95c26985ccbc-config\") pod \"dnsmasq-dns-7756b9d78c-h2zld\" (UID: \"1893ef85-1e86-4247-8950-95c26985ccbc\") " pod="openstack/dnsmasq-dns-7756b9d78c-h2zld" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.903080 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1893ef85-1e86-4247-8950-95c26985ccbc-dns-swift-storage-0\") pod \"dnsmasq-dns-7756b9d78c-h2zld\" (UID: \"1893ef85-1e86-4247-8950-95c26985ccbc\") " pod="openstack/dnsmasq-dns-7756b9d78c-h2zld" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.903109 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qmfzb\" (UniqueName: \"kubernetes.io/projected/1893ef85-1e86-4247-8950-95c26985ccbc-kube-api-access-qmfzb\") pod \"dnsmasq-dns-7756b9d78c-h2zld\" (UID: \"1893ef85-1e86-4247-8950-95c26985ccbc\") " pod="openstack/dnsmasq-dns-7756b9d78c-h2zld" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.903246 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1893ef85-1e86-4247-8950-95c26985ccbc-dns-svc\") pod \"dnsmasq-dns-7756b9d78c-h2zld\" (UID: \"1893ef85-1e86-4247-8950-95c26985ccbc\") " pod="openstack/dnsmasq-dns-7756b9d78c-h2zld" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.903324 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1893ef85-1e86-4247-8950-95c26985ccbc-ovsdbserver-sb\") pod \"dnsmasq-dns-7756b9d78c-h2zld\" (UID: \"1893ef85-1e86-4247-8950-95c26985ccbc\") " pod="openstack/dnsmasq-dns-7756b9d78c-h2zld" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.903601 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e9202dd4-4544-4a66-a4da-160c8c918cac-config-data-custom\") pod \"heat-cfnapi-7b57c6bb6b-lx7wx\" (UID: \"e9202dd4-4544-4a66-a4da-160c8c918cac\") " pod="openstack/heat-cfnapi-7b57c6bb6b-lx7wx" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.918719 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9202dd4-4544-4a66-a4da-160c8c918cac-combined-ca-bundle\") pod \"heat-cfnapi-7b57c6bb6b-lx7wx\" (UID: \"e9202dd4-4544-4a66-a4da-160c8c918cac\") " pod="openstack/heat-cfnapi-7b57c6bb6b-lx7wx" Dec 09 15:33:56 crc kubenswrapper[4716]: I1209 15:33:56.920639 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-5fb7859fb4-vxfkf"] Dec 09 15:33:57 crc kubenswrapper[4716]: I1209 15:33:57.030735 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1893ef85-1e86-4247-8950-95c26985ccbc-dns-svc\") pod \"dnsmasq-dns-7756b9d78c-h2zld\" (UID: \"1893ef85-1e86-4247-8950-95c26985ccbc\") " pod="openstack/dnsmasq-dns-7756b9d78c-h2zld" Dec 09 15:33:57 crc kubenswrapper[4716]: I1209 15:33:57.031130 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1893ef85-1e86-4247-8950-95c26985ccbc-ovsdbserver-sb\") pod \"dnsmasq-dns-7756b9d78c-h2zld\" (UID: \"1893ef85-1e86-4247-8950-95c26985ccbc\") " pod="openstack/dnsmasq-dns-7756b9d78c-h2zld" Dec 09 15:33:57 crc kubenswrapper[4716]: I1209 15:33:57.031171 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e9202dd4-4544-4a66-a4da-160c8c918cac-config-data-custom\") pod \"heat-cfnapi-7b57c6bb6b-lx7wx\" (UID: \"e9202dd4-4544-4a66-a4da-160c8c918cac\") " pod="openstack/heat-cfnapi-7b57c6bb6b-lx7wx" Dec 09 15:33:57 crc kubenswrapper[4716]: I1209 15:33:57.031204 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0595a046-577b-4894-8f05-36663a2a4c64-combined-ca-bundle\") pod \"heat-api-5fb7859fb4-vxfkf\" (UID: \"0595a046-577b-4894-8f05-36663a2a4c64\") " pod="openstack/heat-api-5fb7859fb4-vxfkf" Dec 09 15:33:57 crc kubenswrapper[4716]: I1209 15:33:57.031282 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9202dd4-4544-4a66-a4da-160c8c918cac-combined-ca-bundle\") pod \"heat-cfnapi-7b57c6bb6b-lx7wx\" (UID: \"e9202dd4-4544-4a66-a4da-160c8c918cac\") " pod="openstack/heat-cfnapi-7b57c6bb6b-lx7wx" Dec 09 15:33:57 crc kubenswrapper[4716]: I1209 15:33:57.031374 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w5sbp\" (UniqueName: \"kubernetes.io/projected/e9202dd4-4544-4a66-a4da-160c8c918cac-kube-api-access-w5sbp\") pod \"heat-cfnapi-7b57c6bb6b-lx7wx\" (UID: \"e9202dd4-4544-4a66-a4da-160c8c918cac\") " pod="openstack/heat-cfnapi-7b57c6bb6b-lx7wx" Dec 09 15:33:57 crc kubenswrapper[4716]: I1209 15:33:57.031444 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9202dd4-4544-4a66-a4da-160c8c918cac-config-data\") pod \"heat-cfnapi-7b57c6bb6b-lx7wx\" (UID: \"e9202dd4-4544-4a66-a4da-160c8c918cac\") " pod="openstack/heat-cfnapi-7b57c6bb6b-lx7wx" Dec 09 15:33:57 crc kubenswrapper[4716]: I1209 15:33:57.031514 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bcrhk\" (UniqueName: \"kubernetes.io/projected/0595a046-577b-4894-8f05-36663a2a4c64-kube-api-access-bcrhk\") pod \"heat-api-5fb7859fb4-vxfkf\" (UID: \"0595a046-577b-4894-8f05-36663a2a4c64\") " pod="openstack/heat-api-5fb7859fb4-vxfkf" Dec 09 15:33:57 crc kubenswrapper[4716]: I1209 15:33:57.031580 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1893ef85-1e86-4247-8950-95c26985ccbc-ovsdbserver-nb\") pod \"dnsmasq-dns-7756b9d78c-h2zld\" (UID: \"1893ef85-1e86-4247-8950-95c26985ccbc\") " pod="openstack/dnsmasq-dns-7756b9d78c-h2zld" Dec 09 15:33:57 crc kubenswrapper[4716]: I1209 15:33:57.031632 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1893ef85-1e86-4247-8950-95c26985ccbc-config\") pod \"dnsmasq-dns-7756b9d78c-h2zld\" (UID: \"1893ef85-1e86-4247-8950-95c26985ccbc\") " pod="openstack/dnsmasq-dns-7756b9d78c-h2zld" Dec 09 15:33:57 crc kubenswrapper[4716]: I1209 15:33:57.031654 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1893ef85-1e86-4247-8950-95c26985ccbc-dns-swift-storage-0\") pod \"dnsmasq-dns-7756b9d78c-h2zld\" (UID: \"1893ef85-1e86-4247-8950-95c26985ccbc\") " pod="openstack/dnsmasq-dns-7756b9d78c-h2zld" Dec 09 15:33:57 crc kubenswrapper[4716]: I1209 15:33:57.031672 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qmfzb\" (UniqueName: \"kubernetes.io/projected/1893ef85-1e86-4247-8950-95c26985ccbc-kube-api-access-qmfzb\") pod \"dnsmasq-dns-7756b9d78c-h2zld\" (UID: \"1893ef85-1e86-4247-8950-95c26985ccbc\") " pod="openstack/dnsmasq-dns-7756b9d78c-h2zld" Dec 09 15:33:57 crc kubenswrapper[4716]: I1209 15:33:57.031697 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0595a046-577b-4894-8f05-36663a2a4c64-config-data-custom\") pod \"heat-api-5fb7859fb4-vxfkf\" (UID: \"0595a046-577b-4894-8f05-36663a2a4c64\") " pod="openstack/heat-api-5fb7859fb4-vxfkf" Dec 09 15:33:57 crc kubenswrapper[4716]: I1209 15:33:57.031731 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0595a046-577b-4894-8f05-36663a2a4c64-config-data\") pod \"heat-api-5fb7859fb4-vxfkf\" (UID: \"0595a046-577b-4894-8f05-36663a2a4c64\") " pod="openstack/heat-api-5fb7859fb4-vxfkf" Dec 09 15:33:57 crc kubenswrapper[4716]: I1209 15:33:57.042483 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e9202dd4-4544-4a66-a4da-160c8c918cac-config-data-custom\") pod \"heat-cfnapi-7b57c6bb6b-lx7wx\" (UID: \"e9202dd4-4544-4a66-a4da-160c8c918cac\") " pod="openstack/heat-cfnapi-7b57c6bb6b-lx7wx" Dec 09 15:33:57 crc kubenswrapper[4716]: I1209 15:33:57.043495 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9202dd4-4544-4a66-a4da-160c8c918cac-combined-ca-bundle\") pod \"heat-cfnapi-7b57c6bb6b-lx7wx\" (UID: \"e9202dd4-4544-4a66-a4da-160c8c918cac\") " pod="openstack/heat-cfnapi-7b57c6bb6b-lx7wx" Dec 09 15:33:57 crc kubenswrapper[4716]: I1209 15:33:57.043920 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9202dd4-4544-4a66-a4da-160c8c918cac-config-data\") pod \"heat-cfnapi-7b57c6bb6b-lx7wx\" (UID: \"e9202dd4-4544-4a66-a4da-160c8c918cac\") " pod="openstack/heat-cfnapi-7b57c6bb6b-lx7wx" Dec 09 15:33:57 crc kubenswrapper[4716]: I1209 15:33:57.080068 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w5sbp\" (UniqueName: \"kubernetes.io/projected/e9202dd4-4544-4a66-a4da-160c8c918cac-kube-api-access-w5sbp\") pod \"heat-cfnapi-7b57c6bb6b-lx7wx\" (UID: \"e9202dd4-4544-4a66-a4da-160c8c918cac\") " pod="openstack/heat-cfnapi-7b57c6bb6b-lx7wx" Dec 09 15:33:57 crc kubenswrapper[4716]: I1209 15:33:57.133387 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bcrhk\" (UniqueName: \"kubernetes.io/projected/0595a046-577b-4894-8f05-36663a2a4c64-kube-api-access-bcrhk\") pod \"heat-api-5fb7859fb4-vxfkf\" (UID: \"0595a046-577b-4894-8f05-36663a2a4c64\") " pod="openstack/heat-api-5fb7859fb4-vxfkf" Dec 09 15:33:57 crc kubenswrapper[4716]: I1209 15:33:57.133564 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0595a046-577b-4894-8f05-36663a2a4c64-config-data-custom\") pod \"heat-api-5fb7859fb4-vxfkf\" (UID: \"0595a046-577b-4894-8f05-36663a2a4c64\") " pod="openstack/heat-api-5fb7859fb4-vxfkf" Dec 09 15:33:57 crc kubenswrapper[4716]: I1209 15:33:57.133606 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0595a046-577b-4894-8f05-36663a2a4c64-config-data\") pod \"heat-api-5fb7859fb4-vxfkf\" (UID: \"0595a046-577b-4894-8f05-36663a2a4c64\") " pod="openstack/heat-api-5fb7859fb4-vxfkf" Dec 09 15:33:57 crc kubenswrapper[4716]: I1209 15:33:57.133744 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0595a046-577b-4894-8f05-36663a2a4c64-combined-ca-bundle\") pod \"heat-api-5fb7859fb4-vxfkf\" (UID: \"0595a046-577b-4894-8f05-36663a2a4c64\") " pod="openstack/heat-api-5fb7859fb4-vxfkf" Dec 09 15:33:57 crc kubenswrapper[4716]: I1209 15:33:57.141281 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0595a046-577b-4894-8f05-36663a2a4c64-combined-ca-bundle\") pod \"heat-api-5fb7859fb4-vxfkf\" (UID: \"0595a046-577b-4894-8f05-36663a2a4c64\") " pod="openstack/heat-api-5fb7859fb4-vxfkf" Dec 09 15:33:57 crc kubenswrapper[4716]: I1209 15:33:57.145452 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0595a046-577b-4894-8f05-36663a2a4c64-config-data-custom\") pod \"heat-api-5fb7859fb4-vxfkf\" (UID: \"0595a046-577b-4894-8f05-36663a2a4c64\") " pod="openstack/heat-api-5fb7859fb4-vxfkf" Dec 09 15:33:57 crc kubenswrapper[4716]: I1209 15:33:57.147414 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0595a046-577b-4894-8f05-36663a2a4c64-config-data\") pod \"heat-api-5fb7859fb4-vxfkf\" (UID: \"0595a046-577b-4894-8f05-36663a2a4c64\") " pod="openstack/heat-api-5fb7859fb4-vxfkf" Dec 09 15:33:57 crc kubenswrapper[4716]: I1209 15:33:57.161831 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bcrhk\" (UniqueName: \"kubernetes.io/projected/0595a046-577b-4894-8f05-36663a2a4c64-kube-api-access-bcrhk\") pod \"heat-api-5fb7859fb4-vxfkf\" (UID: \"0595a046-577b-4894-8f05-36663a2a4c64\") " pod="openstack/heat-api-5fb7859fb4-vxfkf" Dec 09 15:33:57 crc kubenswrapper[4716]: I1209 15:33:57.183851 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7b57c6bb6b-lx7wx" Dec 09 15:33:57 crc kubenswrapper[4716]: I1209 15:33:57.194150 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1893ef85-1e86-4247-8950-95c26985ccbc-config\") pod \"dnsmasq-dns-7756b9d78c-h2zld\" (UID: \"1893ef85-1e86-4247-8950-95c26985ccbc\") " pod="openstack/dnsmasq-dns-7756b9d78c-h2zld" Dec 09 15:33:57 crc kubenswrapper[4716]: I1209 15:33:57.194400 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1893ef85-1e86-4247-8950-95c26985ccbc-dns-svc\") pod \"dnsmasq-dns-7756b9d78c-h2zld\" (UID: \"1893ef85-1e86-4247-8950-95c26985ccbc\") " pod="openstack/dnsmasq-dns-7756b9d78c-h2zld" Dec 09 15:33:57 crc kubenswrapper[4716]: I1209 15:33:57.194744 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1893ef85-1e86-4247-8950-95c26985ccbc-dns-swift-storage-0\") pod \"dnsmasq-dns-7756b9d78c-h2zld\" (UID: \"1893ef85-1e86-4247-8950-95c26985ccbc\") " pod="openstack/dnsmasq-dns-7756b9d78c-h2zld" Dec 09 15:33:57 crc kubenswrapper[4716]: I1209 15:33:57.195012 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1893ef85-1e86-4247-8950-95c26985ccbc-ovsdbserver-nb\") pod \"dnsmasq-dns-7756b9d78c-h2zld\" (UID: \"1893ef85-1e86-4247-8950-95c26985ccbc\") " pod="openstack/dnsmasq-dns-7756b9d78c-h2zld" Dec 09 15:33:57 crc kubenswrapper[4716]: I1209 15:33:57.198156 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1893ef85-1e86-4247-8950-95c26985ccbc-ovsdbserver-sb\") pod \"dnsmasq-dns-7756b9d78c-h2zld\" (UID: \"1893ef85-1e86-4247-8950-95c26985ccbc\") " pod="openstack/dnsmasq-dns-7756b9d78c-h2zld" Dec 09 15:33:57 crc kubenswrapper[4716]: I1209 15:33:57.208584 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qmfzb\" (UniqueName: \"kubernetes.io/projected/1893ef85-1e86-4247-8950-95c26985ccbc-kube-api-access-qmfzb\") pod \"dnsmasq-dns-7756b9d78c-h2zld\" (UID: \"1893ef85-1e86-4247-8950-95c26985ccbc\") " pod="openstack/dnsmasq-dns-7756b9d78c-h2zld" Dec 09 15:33:57 crc kubenswrapper[4716]: I1209 15:33:57.267866 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5fb7859fb4-vxfkf" Dec 09 15:33:57 crc kubenswrapper[4716]: I1209 15:33:57.269190 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="04ee94e7-2ee0-4fbb-9145-22effa8ab009" path="/var/lib/kubelet/pods/04ee94e7-2ee0-4fbb-9145-22effa8ab009/volumes" Dec 09 15:33:57 crc kubenswrapper[4716]: I1209 15:33:57.381374 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7756b9d78c-h2zld" Dec 09 15:33:57 crc kubenswrapper[4716]: I1209 15:33:57.880246 4716 generic.go:334] "Generic (PLEG): container finished" podID="1075c22d-76f2-4c48-86b1-23374040e152" containerID="a692febd399ac1eae8f990122567fc7d3c009d489f4f505a13d1eac063480af7" exitCode=0 Dec 09 15:33:57 crc kubenswrapper[4716]: I1209 15:33:57.880327 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-85qhw" event={"ID":"1075c22d-76f2-4c48-86b1-23374040e152","Type":"ContainerDied","Data":"a692febd399ac1eae8f990122567fc7d3c009d489f4f505a13d1eac063480af7"} Dec 09 15:33:58 crc kubenswrapper[4716]: I1209 15:33:58.933320 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-7ff4fc547f-x4v24" Dec 09 15:33:58 crc kubenswrapper[4716]: I1209 15:33:58.934172 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-7ff4fc547f-x4v24" Dec 09 15:34:00 crc kubenswrapper[4716]: I1209 15:34:00.936402 4716 generic.go:334] "Generic (PLEG): container finished" podID="ef9ed626-a15a-44e8-9f3f-341d1a6967b7" containerID="edebf391edf04647c06a6b9d3f7b27f7477b570f909356bd43f842cb8ddbea37" exitCode=137 Dec 09 15:34:00 crc kubenswrapper[4716]: I1209 15:34:00.936491 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ef9ed626-a15a-44e8-9f3f-341d1a6967b7","Type":"ContainerDied","Data":"edebf391edf04647c06a6b9d3f7b27f7477b570f909356bd43f842cb8ddbea37"} Dec 09 15:34:01 crc kubenswrapper[4716]: I1209 15:34:01.247597 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="ef9ed626-a15a-44e8-9f3f-341d1a6967b7" containerName="cinder-api" probeResult="failure" output="Get \"http://10.217.0.200:8776/healthcheck\": dial tcp 10.217.0.200:8776: connect: connection refused" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.160198 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-7b685d4d7d-v9gv5"] Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.164570 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-7b685d4d7d-v9gv5" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.193799 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-7b685d4d7d-v9gv5"] Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.270055 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-8676697bb7-dl97q"] Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.281916 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-6b76b79b89-8lgpq"] Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.283489 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-8676697bb7-dl97q" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.304813 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-8676697bb7-dl97q"] Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.304991 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-6b76b79b89-8lgpq" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.324345 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-6b76b79b89-8lgpq"] Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.334534 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0a60d448-8732-476b-9caa-5d47786ebcbf-config-data-custom\") pod \"heat-engine-7b685d4d7d-v9gv5\" (UID: \"0a60d448-8732-476b-9caa-5d47786ebcbf\") " pod="openstack/heat-engine-7b685d4d7d-v9gv5" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.334663 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/917d6c5b-6e8a-4ef6-9057-41a76202707a-combined-ca-bundle\") pod \"heat-cfnapi-8676697bb7-dl97q\" (UID: \"917d6c5b-6e8a-4ef6-9057-41a76202707a\") " pod="openstack/heat-cfnapi-8676697bb7-dl97q" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.334878 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r64cg\" (UniqueName: \"kubernetes.io/projected/0a60d448-8732-476b-9caa-5d47786ebcbf-kube-api-access-r64cg\") pod \"heat-engine-7b685d4d7d-v9gv5\" (UID: \"0a60d448-8732-476b-9caa-5d47786ebcbf\") " pod="openstack/heat-engine-7b685d4d7d-v9gv5" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.334993 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d328b5d1-8e4d-4a23-b326-4ab0fd436482-combined-ca-bundle\") pod \"heat-api-6b76b79b89-8lgpq\" (UID: \"d328b5d1-8e4d-4a23-b326-4ab0fd436482\") " pod="openstack/heat-api-6b76b79b89-8lgpq" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.335098 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f85tb\" (UniqueName: \"kubernetes.io/projected/917d6c5b-6e8a-4ef6-9057-41a76202707a-kube-api-access-f85tb\") pod \"heat-cfnapi-8676697bb7-dl97q\" (UID: \"917d6c5b-6e8a-4ef6-9057-41a76202707a\") " pod="openstack/heat-cfnapi-8676697bb7-dl97q" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.338458 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/917d6c5b-6e8a-4ef6-9057-41a76202707a-config-data\") pod \"heat-cfnapi-8676697bb7-dl97q\" (UID: \"917d6c5b-6e8a-4ef6-9057-41a76202707a\") " pod="openstack/heat-cfnapi-8676697bb7-dl97q" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.338789 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a60d448-8732-476b-9caa-5d47786ebcbf-config-data\") pod \"heat-engine-7b685d4d7d-v9gv5\" (UID: \"0a60d448-8732-476b-9caa-5d47786ebcbf\") " pod="openstack/heat-engine-7b685d4d7d-v9gv5" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.338848 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/917d6c5b-6e8a-4ef6-9057-41a76202707a-config-data-custom\") pod \"heat-cfnapi-8676697bb7-dl97q\" (UID: \"917d6c5b-6e8a-4ef6-9057-41a76202707a\") " pod="openstack/heat-cfnapi-8676697bb7-dl97q" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.338921 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a60d448-8732-476b-9caa-5d47786ebcbf-combined-ca-bundle\") pod \"heat-engine-7b685d4d7d-v9gv5\" (UID: \"0a60d448-8732-476b-9caa-5d47786ebcbf\") " pod="openstack/heat-engine-7b685d4d7d-v9gv5" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.338945 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d328b5d1-8e4d-4a23-b326-4ab0fd436482-config-data-custom\") pod \"heat-api-6b76b79b89-8lgpq\" (UID: \"d328b5d1-8e4d-4a23-b326-4ab0fd436482\") " pod="openstack/heat-api-6b76b79b89-8lgpq" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.338971 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d328b5d1-8e4d-4a23-b326-4ab0fd436482-config-data\") pod \"heat-api-6b76b79b89-8lgpq\" (UID: \"d328b5d1-8e4d-4a23-b326-4ab0fd436482\") " pod="openstack/heat-api-6b76b79b89-8lgpq" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.338994 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d5n6g\" (UniqueName: \"kubernetes.io/projected/d328b5d1-8e4d-4a23-b326-4ab0fd436482-kube-api-access-d5n6g\") pod \"heat-api-6b76b79b89-8lgpq\" (UID: \"d328b5d1-8e4d-4a23-b326-4ab0fd436482\") " pod="openstack/heat-api-6b76b79b89-8lgpq" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.441936 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0a60d448-8732-476b-9caa-5d47786ebcbf-config-data-custom\") pod \"heat-engine-7b685d4d7d-v9gv5\" (UID: \"0a60d448-8732-476b-9caa-5d47786ebcbf\") " pod="openstack/heat-engine-7b685d4d7d-v9gv5" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.442002 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/917d6c5b-6e8a-4ef6-9057-41a76202707a-combined-ca-bundle\") pod \"heat-cfnapi-8676697bb7-dl97q\" (UID: \"917d6c5b-6e8a-4ef6-9057-41a76202707a\") " pod="openstack/heat-cfnapi-8676697bb7-dl97q" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.442077 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r64cg\" (UniqueName: \"kubernetes.io/projected/0a60d448-8732-476b-9caa-5d47786ebcbf-kube-api-access-r64cg\") pod \"heat-engine-7b685d4d7d-v9gv5\" (UID: \"0a60d448-8732-476b-9caa-5d47786ebcbf\") " pod="openstack/heat-engine-7b685d4d7d-v9gv5" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.442120 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d328b5d1-8e4d-4a23-b326-4ab0fd436482-combined-ca-bundle\") pod \"heat-api-6b76b79b89-8lgpq\" (UID: \"d328b5d1-8e4d-4a23-b326-4ab0fd436482\") " pod="openstack/heat-api-6b76b79b89-8lgpq" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.442157 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f85tb\" (UniqueName: \"kubernetes.io/projected/917d6c5b-6e8a-4ef6-9057-41a76202707a-kube-api-access-f85tb\") pod \"heat-cfnapi-8676697bb7-dl97q\" (UID: \"917d6c5b-6e8a-4ef6-9057-41a76202707a\") " pod="openstack/heat-cfnapi-8676697bb7-dl97q" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.442175 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/917d6c5b-6e8a-4ef6-9057-41a76202707a-config-data\") pod \"heat-cfnapi-8676697bb7-dl97q\" (UID: \"917d6c5b-6e8a-4ef6-9057-41a76202707a\") " pod="openstack/heat-cfnapi-8676697bb7-dl97q" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.442244 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a60d448-8732-476b-9caa-5d47786ebcbf-config-data\") pod \"heat-engine-7b685d4d7d-v9gv5\" (UID: \"0a60d448-8732-476b-9caa-5d47786ebcbf\") " pod="openstack/heat-engine-7b685d4d7d-v9gv5" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.442272 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/917d6c5b-6e8a-4ef6-9057-41a76202707a-config-data-custom\") pod \"heat-cfnapi-8676697bb7-dl97q\" (UID: \"917d6c5b-6e8a-4ef6-9057-41a76202707a\") " pod="openstack/heat-cfnapi-8676697bb7-dl97q" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.442305 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a60d448-8732-476b-9caa-5d47786ebcbf-combined-ca-bundle\") pod \"heat-engine-7b685d4d7d-v9gv5\" (UID: \"0a60d448-8732-476b-9caa-5d47786ebcbf\") " pod="openstack/heat-engine-7b685d4d7d-v9gv5" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.442324 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d328b5d1-8e4d-4a23-b326-4ab0fd436482-config-data-custom\") pod \"heat-api-6b76b79b89-8lgpq\" (UID: \"d328b5d1-8e4d-4a23-b326-4ab0fd436482\") " pod="openstack/heat-api-6b76b79b89-8lgpq" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.442343 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d328b5d1-8e4d-4a23-b326-4ab0fd436482-config-data\") pod \"heat-api-6b76b79b89-8lgpq\" (UID: \"d328b5d1-8e4d-4a23-b326-4ab0fd436482\") " pod="openstack/heat-api-6b76b79b89-8lgpq" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.442359 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d5n6g\" (UniqueName: \"kubernetes.io/projected/d328b5d1-8e4d-4a23-b326-4ab0fd436482-kube-api-access-d5n6g\") pod \"heat-api-6b76b79b89-8lgpq\" (UID: \"d328b5d1-8e4d-4a23-b326-4ab0fd436482\") " pod="openstack/heat-api-6b76b79b89-8lgpq" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.453976 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a60d448-8732-476b-9caa-5d47786ebcbf-combined-ca-bundle\") pod \"heat-engine-7b685d4d7d-v9gv5\" (UID: \"0a60d448-8732-476b-9caa-5d47786ebcbf\") " pod="openstack/heat-engine-7b685d4d7d-v9gv5" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.457957 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/917d6c5b-6e8a-4ef6-9057-41a76202707a-config-data-custom\") pod \"heat-cfnapi-8676697bb7-dl97q\" (UID: \"917d6c5b-6e8a-4ef6-9057-41a76202707a\") " pod="openstack/heat-cfnapi-8676697bb7-dl97q" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.477679 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r64cg\" (UniqueName: \"kubernetes.io/projected/0a60d448-8732-476b-9caa-5d47786ebcbf-kube-api-access-r64cg\") pod \"heat-engine-7b685d4d7d-v9gv5\" (UID: \"0a60d448-8732-476b-9caa-5d47786ebcbf\") " pod="openstack/heat-engine-7b685d4d7d-v9gv5" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.478053 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/917d6c5b-6e8a-4ef6-9057-41a76202707a-config-data\") pod \"heat-cfnapi-8676697bb7-dl97q\" (UID: \"917d6c5b-6e8a-4ef6-9057-41a76202707a\") " pod="openstack/heat-cfnapi-8676697bb7-dl97q" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.478073 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a60d448-8732-476b-9caa-5d47786ebcbf-config-data\") pod \"heat-engine-7b685d4d7d-v9gv5\" (UID: \"0a60d448-8732-476b-9caa-5d47786ebcbf\") " pod="openstack/heat-engine-7b685d4d7d-v9gv5" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.478208 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d5n6g\" (UniqueName: \"kubernetes.io/projected/d328b5d1-8e4d-4a23-b326-4ab0fd436482-kube-api-access-d5n6g\") pod \"heat-api-6b76b79b89-8lgpq\" (UID: \"d328b5d1-8e4d-4a23-b326-4ab0fd436482\") " pod="openstack/heat-api-6b76b79b89-8lgpq" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.478334 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d328b5d1-8e4d-4a23-b326-4ab0fd436482-combined-ca-bundle\") pod \"heat-api-6b76b79b89-8lgpq\" (UID: \"d328b5d1-8e4d-4a23-b326-4ab0fd436482\") " pod="openstack/heat-api-6b76b79b89-8lgpq" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.478806 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/917d6c5b-6e8a-4ef6-9057-41a76202707a-combined-ca-bundle\") pod \"heat-cfnapi-8676697bb7-dl97q\" (UID: \"917d6c5b-6e8a-4ef6-9057-41a76202707a\") " pod="openstack/heat-cfnapi-8676697bb7-dl97q" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.478921 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d328b5d1-8e4d-4a23-b326-4ab0fd436482-config-data-custom\") pod \"heat-api-6b76b79b89-8lgpq\" (UID: \"d328b5d1-8e4d-4a23-b326-4ab0fd436482\") " pod="openstack/heat-api-6b76b79b89-8lgpq" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.478944 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0a60d448-8732-476b-9caa-5d47786ebcbf-config-data-custom\") pod \"heat-engine-7b685d4d7d-v9gv5\" (UID: \"0a60d448-8732-476b-9caa-5d47786ebcbf\") " pod="openstack/heat-engine-7b685d4d7d-v9gv5" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.479256 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d328b5d1-8e4d-4a23-b326-4ab0fd436482-config-data\") pod \"heat-api-6b76b79b89-8lgpq\" (UID: \"d328b5d1-8e4d-4a23-b326-4ab0fd436482\") " pod="openstack/heat-api-6b76b79b89-8lgpq" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.489036 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f85tb\" (UniqueName: \"kubernetes.io/projected/917d6c5b-6e8a-4ef6-9057-41a76202707a-kube-api-access-f85tb\") pod \"heat-cfnapi-8676697bb7-dl97q\" (UID: \"917d6c5b-6e8a-4ef6-9057-41a76202707a\") " pod="openstack/heat-cfnapi-8676697bb7-dl97q" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.511175 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-7b685d4d7d-v9gv5" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.616284 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-8676697bb7-dl97q" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.639581 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-6b76b79b89-8lgpq" Dec 09 15:34:03 crc kubenswrapper[4716]: I1209 15:34:03.854112 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:34:04 crc kubenswrapper[4716]: E1209 15:34:04.278149 4716 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a692febd399ac1eae8f990122567fc7d3c009d489f4f505a13d1eac063480af7 is running failed: container process not found" containerID="a692febd399ac1eae8f990122567fc7d3c009d489f4f505a13d1eac063480af7" cmd=["grpc_health_probe","-addr=:50051"] Dec 09 15:34:04 crc kubenswrapper[4716]: E1209 15:34:04.278821 4716 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a692febd399ac1eae8f990122567fc7d3c009d489f4f505a13d1eac063480af7 is running failed: container process not found" containerID="a692febd399ac1eae8f990122567fc7d3c009d489f4f505a13d1eac063480af7" cmd=["grpc_health_probe","-addr=:50051"] Dec 09 15:34:04 crc kubenswrapper[4716]: E1209 15:34:04.282872 4716 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a692febd399ac1eae8f990122567fc7d3c009d489f4f505a13d1eac063480af7 is running failed: container process not found" containerID="a692febd399ac1eae8f990122567fc7d3c009d489f4f505a13d1eac063480af7" cmd=["grpc_health_probe","-addr=:50051"] Dec 09 15:34:04 crc kubenswrapper[4716]: E1209 15:34:04.282966 4716 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a692febd399ac1eae8f990122567fc7d3c009d489f4f505a13d1eac063480af7 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-85qhw" podUID="1075c22d-76f2-4c48-86b1-23374040e152" containerName="registry-server" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.606572 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-5fb7859fb4-vxfkf"] Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.623005 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-7b57c6bb6b-lx7wx"] Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.657784 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-5757f44d58-n4z9t"] Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.660025 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5757f44d58-n4z9t" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.667322 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-internal-svc" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.667532 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-public-svc" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.676266 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-76845fb89c-tcnr6"] Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.678046 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-76845fb89c-tcnr6" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.686718 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-internal-svc" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.686783 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-public-svc" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.695879 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-5757f44d58-n4z9t"] Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.706003 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-76845fb89c-tcnr6"] Dec 09 15:34:05 crc kubenswrapper[4716]: E1209 15:34:05.709696 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified" Dec 09 15:34:05 crc kubenswrapper[4716]: E1209 15:34:05.709867 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:openstackclient,Image:quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified,Command:[/bin/sleep],Args:[infinity],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n97hfch5f9h75hch5c5h78h5c6h648h97h66h5dbh5f9h697h4h55fh67ch59dh64ch687h76hc8h9ch5dbh5fdh5f4h74h59h664h5fdh687hfbq,ValueFrom:nil,},EnvVar{Name:OS_CLOUD,Value:default,ValueFrom:nil,},EnvVar{Name:PROMETHEUS_CA_CERT,Value:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,ValueFrom:nil,},EnvVar{Name:PROMETHEUS_HOST,Value:metric-storage-prometheus.openstack.svc,ValueFrom:nil,},EnvVar{Name:PROMETHEUS_PORT,Value:9090,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:openstack-config,ReadOnly:false,MountPath:/home/cloud-admin/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/home/cloud-admin/.config/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/home/cloud-admin/cloudrc,SubPath:cloudrc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-j7t4q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42401,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42401,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstackclient_openstack(90ad0d77-c429-467b-a32d-46be1ccd1c9b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 09 15:34:05 crc kubenswrapper[4716]: E1209 15:34:05.711170 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstackclient\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstackclient" podUID="90ad0d77-c429-467b-a32d-46be1ccd1c9b" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.779496 4716 scope.go:117] "RemoveContainer" containerID="4ca96384d533b03a05a7480a64c81fe0f0f3605373f37166698ac991432ab754" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.813443 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35e82a88-b5cc-4be0-a4b4-b654d67e103d-combined-ca-bundle\") pod \"heat-cfnapi-76845fb89c-tcnr6\" (UID: \"35e82a88-b5cc-4be0-a4b4-b654d67e103d\") " pod="openstack/heat-cfnapi-76845fb89c-tcnr6" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.813505 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/35e82a88-b5cc-4be0-a4b4-b654d67e103d-public-tls-certs\") pod \"heat-cfnapi-76845fb89c-tcnr6\" (UID: \"35e82a88-b5cc-4be0-a4b4-b654d67e103d\") " pod="openstack/heat-cfnapi-76845fb89c-tcnr6" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.813547 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35e82a88-b5cc-4be0-a4b4-b654d67e103d-config-data\") pod \"heat-cfnapi-76845fb89c-tcnr6\" (UID: \"35e82a88-b5cc-4be0-a4b4-b654d67e103d\") " pod="openstack/heat-cfnapi-76845fb89c-tcnr6" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.813573 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a54facb6-d928-4957-a382-5f77e827b336-config-data-custom\") pod \"heat-api-5757f44d58-n4z9t\" (UID: \"a54facb6-d928-4957-a382-5f77e827b336\") " pod="openstack/heat-api-5757f44d58-n4z9t" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.813615 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a54facb6-d928-4957-a382-5f77e827b336-public-tls-certs\") pod \"heat-api-5757f44d58-n4z9t\" (UID: \"a54facb6-d928-4957-a382-5f77e827b336\") " pod="openstack/heat-api-5757f44d58-n4z9t" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.813683 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/35e82a88-b5cc-4be0-a4b4-b654d67e103d-config-data-custom\") pod \"heat-cfnapi-76845fb89c-tcnr6\" (UID: \"35e82a88-b5cc-4be0-a4b4-b654d67e103d\") " pod="openstack/heat-cfnapi-76845fb89c-tcnr6" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.813816 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8fbsp\" (UniqueName: \"kubernetes.io/projected/35e82a88-b5cc-4be0-a4b4-b654d67e103d-kube-api-access-8fbsp\") pod \"heat-cfnapi-76845fb89c-tcnr6\" (UID: \"35e82a88-b5cc-4be0-a4b4-b654d67e103d\") " pod="openstack/heat-cfnapi-76845fb89c-tcnr6" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.813855 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a54facb6-d928-4957-a382-5f77e827b336-config-data\") pod \"heat-api-5757f44d58-n4z9t\" (UID: \"a54facb6-d928-4957-a382-5f77e827b336\") " pod="openstack/heat-api-5757f44d58-n4z9t" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.813934 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/35e82a88-b5cc-4be0-a4b4-b654d67e103d-internal-tls-certs\") pod \"heat-cfnapi-76845fb89c-tcnr6\" (UID: \"35e82a88-b5cc-4be0-a4b4-b654d67e103d\") " pod="openstack/heat-cfnapi-76845fb89c-tcnr6" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.813961 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a54facb6-d928-4957-a382-5f77e827b336-internal-tls-certs\") pod \"heat-api-5757f44d58-n4z9t\" (UID: \"a54facb6-d928-4957-a382-5f77e827b336\") " pod="openstack/heat-api-5757f44d58-n4z9t" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.814054 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a54facb6-d928-4957-a382-5f77e827b336-combined-ca-bundle\") pod \"heat-api-5757f44d58-n4z9t\" (UID: \"a54facb6-d928-4957-a382-5f77e827b336\") " pod="openstack/heat-api-5757f44d58-n4z9t" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.814079 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k5jvx\" (UniqueName: \"kubernetes.io/projected/a54facb6-d928-4957-a382-5f77e827b336-kube-api-access-k5jvx\") pod \"heat-api-5757f44d58-n4z9t\" (UID: \"a54facb6-d928-4957-a382-5f77e827b336\") " pod="openstack/heat-api-5757f44d58-n4z9t" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.918223 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a54facb6-d928-4957-a382-5f77e827b336-config-data\") pod \"heat-api-5757f44d58-n4z9t\" (UID: \"a54facb6-d928-4957-a382-5f77e827b336\") " pod="openstack/heat-api-5757f44d58-n4z9t" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.918335 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/35e82a88-b5cc-4be0-a4b4-b654d67e103d-internal-tls-certs\") pod \"heat-cfnapi-76845fb89c-tcnr6\" (UID: \"35e82a88-b5cc-4be0-a4b4-b654d67e103d\") " pod="openstack/heat-cfnapi-76845fb89c-tcnr6" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.918360 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a54facb6-d928-4957-a382-5f77e827b336-internal-tls-certs\") pod \"heat-api-5757f44d58-n4z9t\" (UID: \"a54facb6-d928-4957-a382-5f77e827b336\") " pod="openstack/heat-api-5757f44d58-n4z9t" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.918433 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a54facb6-d928-4957-a382-5f77e827b336-combined-ca-bundle\") pod \"heat-api-5757f44d58-n4z9t\" (UID: \"a54facb6-d928-4957-a382-5f77e827b336\") " pod="openstack/heat-api-5757f44d58-n4z9t" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.918454 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k5jvx\" (UniqueName: \"kubernetes.io/projected/a54facb6-d928-4957-a382-5f77e827b336-kube-api-access-k5jvx\") pod \"heat-api-5757f44d58-n4z9t\" (UID: \"a54facb6-d928-4957-a382-5f77e827b336\") " pod="openstack/heat-api-5757f44d58-n4z9t" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.918501 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35e82a88-b5cc-4be0-a4b4-b654d67e103d-combined-ca-bundle\") pod \"heat-cfnapi-76845fb89c-tcnr6\" (UID: \"35e82a88-b5cc-4be0-a4b4-b654d67e103d\") " pod="openstack/heat-cfnapi-76845fb89c-tcnr6" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.918521 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/35e82a88-b5cc-4be0-a4b4-b654d67e103d-public-tls-certs\") pod \"heat-cfnapi-76845fb89c-tcnr6\" (UID: \"35e82a88-b5cc-4be0-a4b4-b654d67e103d\") " pod="openstack/heat-cfnapi-76845fb89c-tcnr6" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.918549 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35e82a88-b5cc-4be0-a4b4-b654d67e103d-config-data\") pod \"heat-cfnapi-76845fb89c-tcnr6\" (UID: \"35e82a88-b5cc-4be0-a4b4-b654d67e103d\") " pod="openstack/heat-cfnapi-76845fb89c-tcnr6" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.918582 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a54facb6-d928-4957-a382-5f77e827b336-config-data-custom\") pod \"heat-api-5757f44d58-n4z9t\" (UID: \"a54facb6-d928-4957-a382-5f77e827b336\") " pod="openstack/heat-api-5757f44d58-n4z9t" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.918692 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a54facb6-d928-4957-a382-5f77e827b336-public-tls-certs\") pod \"heat-api-5757f44d58-n4z9t\" (UID: \"a54facb6-d928-4957-a382-5f77e827b336\") " pod="openstack/heat-api-5757f44d58-n4z9t" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.918731 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/35e82a88-b5cc-4be0-a4b4-b654d67e103d-config-data-custom\") pod \"heat-cfnapi-76845fb89c-tcnr6\" (UID: \"35e82a88-b5cc-4be0-a4b4-b654d67e103d\") " pod="openstack/heat-cfnapi-76845fb89c-tcnr6" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.918818 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8fbsp\" (UniqueName: \"kubernetes.io/projected/35e82a88-b5cc-4be0-a4b4-b654d67e103d-kube-api-access-8fbsp\") pod \"heat-cfnapi-76845fb89c-tcnr6\" (UID: \"35e82a88-b5cc-4be0-a4b4-b654d67e103d\") " pod="openstack/heat-cfnapi-76845fb89c-tcnr6" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.930222 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/35e82a88-b5cc-4be0-a4b4-b654d67e103d-config-data-custom\") pod \"heat-cfnapi-76845fb89c-tcnr6\" (UID: \"35e82a88-b5cc-4be0-a4b4-b654d67e103d\") " pod="openstack/heat-cfnapi-76845fb89c-tcnr6" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.934006 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/35e82a88-b5cc-4be0-a4b4-b654d67e103d-public-tls-certs\") pod \"heat-cfnapi-76845fb89c-tcnr6\" (UID: \"35e82a88-b5cc-4be0-a4b4-b654d67e103d\") " pod="openstack/heat-cfnapi-76845fb89c-tcnr6" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.935118 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/35e82a88-b5cc-4be0-a4b4-b654d67e103d-internal-tls-certs\") pod \"heat-cfnapi-76845fb89c-tcnr6\" (UID: \"35e82a88-b5cc-4be0-a4b4-b654d67e103d\") " pod="openstack/heat-cfnapi-76845fb89c-tcnr6" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.935206 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35e82a88-b5cc-4be0-a4b4-b654d67e103d-config-data\") pod \"heat-cfnapi-76845fb89c-tcnr6\" (UID: \"35e82a88-b5cc-4be0-a4b4-b654d67e103d\") " pod="openstack/heat-cfnapi-76845fb89c-tcnr6" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.935223 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a54facb6-d928-4957-a382-5f77e827b336-config-data-custom\") pod \"heat-api-5757f44d58-n4z9t\" (UID: \"a54facb6-d928-4957-a382-5f77e827b336\") " pod="openstack/heat-api-5757f44d58-n4z9t" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.936437 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35e82a88-b5cc-4be0-a4b4-b654d67e103d-combined-ca-bundle\") pod \"heat-cfnapi-76845fb89c-tcnr6\" (UID: \"35e82a88-b5cc-4be0-a4b4-b654d67e103d\") " pod="openstack/heat-cfnapi-76845fb89c-tcnr6" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.944038 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8fbsp\" (UniqueName: \"kubernetes.io/projected/35e82a88-b5cc-4be0-a4b4-b654d67e103d-kube-api-access-8fbsp\") pod \"heat-cfnapi-76845fb89c-tcnr6\" (UID: \"35e82a88-b5cc-4be0-a4b4-b654d67e103d\") " pod="openstack/heat-cfnapi-76845fb89c-tcnr6" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.945925 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a54facb6-d928-4957-a382-5f77e827b336-public-tls-certs\") pod \"heat-api-5757f44d58-n4z9t\" (UID: \"a54facb6-d928-4957-a382-5f77e827b336\") " pod="openstack/heat-api-5757f44d58-n4z9t" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.947546 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a54facb6-d928-4957-a382-5f77e827b336-internal-tls-certs\") pod \"heat-api-5757f44d58-n4z9t\" (UID: \"a54facb6-d928-4957-a382-5f77e827b336\") " pod="openstack/heat-api-5757f44d58-n4z9t" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.947806 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a54facb6-d928-4957-a382-5f77e827b336-combined-ca-bundle\") pod \"heat-api-5757f44d58-n4z9t\" (UID: \"a54facb6-d928-4957-a382-5f77e827b336\") " pod="openstack/heat-api-5757f44d58-n4z9t" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.948151 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a54facb6-d928-4957-a382-5f77e827b336-config-data\") pod \"heat-api-5757f44d58-n4z9t\" (UID: \"a54facb6-d928-4957-a382-5f77e827b336\") " pod="openstack/heat-api-5757f44d58-n4z9t" Dec 09 15:34:05 crc kubenswrapper[4716]: I1209 15:34:05.950992 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k5jvx\" (UniqueName: \"kubernetes.io/projected/a54facb6-d928-4957-a382-5f77e827b336-kube-api-access-k5jvx\") pod \"heat-api-5757f44d58-n4z9t\" (UID: \"a54facb6-d928-4957-a382-5f77e827b336\") " pod="openstack/heat-api-5757f44d58-n4z9t" Dec 09 15:34:06 crc kubenswrapper[4716]: I1209 15:34:06.016160 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5757f44d58-n4z9t" Dec 09 15:34:06 crc kubenswrapper[4716]: E1209 15:34:06.052295 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstackclient\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified\\\"\"" pod="openstack/openstackclient" podUID="90ad0d77-c429-467b-a32d-46be1ccd1c9b" Dec 09 15:34:06 crc kubenswrapper[4716]: I1209 15:34:06.113265 4716 scope.go:117] "RemoveContainer" containerID="7a59b8aa87fb92ce4a8dcc3201948518059b9a27226748d843452d660b84f71d" Dec 09 15:34:06 crc kubenswrapper[4716]: I1209 15:34:06.152513 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-76845fb89c-tcnr6" Dec 09 15:34:06 crc kubenswrapper[4716]: I1209 15:34:06.370297 4716 scope.go:117] "RemoveContainer" containerID="a1d3b3a81b668ef739d227b362886584f776dcca3b811a4c7b5a2c259ec330b2" Dec 09 15:34:06 crc kubenswrapper[4716]: I1209 15:34:06.496871 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 09 15:34:06 crc kubenswrapper[4716]: I1209 15:34:06.653051 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-combined-ca-bundle\") pod \"ef9ed626-a15a-44e8-9f3f-341d1a6967b7\" (UID: \"ef9ed626-a15a-44e8-9f3f-341d1a6967b7\") " Dec 09 15:34:06 crc kubenswrapper[4716]: I1209 15:34:06.653588 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-logs\") pod \"ef9ed626-a15a-44e8-9f3f-341d1a6967b7\" (UID: \"ef9ed626-a15a-44e8-9f3f-341d1a6967b7\") " Dec 09 15:34:06 crc kubenswrapper[4716]: I1209 15:34:06.653660 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-config-data\") pod \"ef9ed626-a15a-44e8-9f3f-341d1a6967b7\" (UID: \"ef9ed626-a15a-44e8-9f3f-341d1a6967b7\") " Dec 09 15:34:06 crc kubenswrapper[4716]: I1209 15:34:06.653804 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-etc-machine-id\") pod \"ef9ed626-a15a-44e8-9f3f-341d1a6967b7\" (UID: \"ef9ed626-a15a-44e8-9f3f-341d1a6967b7\") " Dec 09 15:34:06 crc kubenswrapper[4716]: I1209 15:34:06.653996 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hzczt\" (UniqueName: \"kubernetes.io/projected/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-kube-api-access-hzczt\") pod \"ef9ed626-a15a-44e8-9f3f-341d1a6967b7\" (UID: \"ef9ed626-a15a-44e8-9f3f-341d1a6967b7\") " Dec 09 15:34:06 crc kubenswrapper[4716]: I1209 15:34:06.654060 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-config-data-custom\") pod \"ef9ed626-a15a-44e8-9f3f-341d1a6967b7\" (UID: \"ef9ed626-a15a-44e8-9f3f-341d1a6967b7\") " Dec 09 15:34:06 crc kubenswrapper[4716]: I1209 15:34:06.654156 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-scripts\") pod \"ef9ed626-a15a-44e8-9f3f-341d1a6967b7\" (UID: \"ef9ed626-a15a-44e8-9f3f-341d1a6967b7\") " Dec 09 15:34:06 crc kubenswrapper[4716]: I1209 15:34:06.656584 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-logs" (OuterVolumeSpecName: "logs") pod "ef9ed626-a15a-44e8-9f3f-341d1a6967b7" (UID: "ef9ed626-a15a-44e8-9f3f-341d1a6967b7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:34:06 crc kubenswrapper[4716]: I1209 15:34:06.658219 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "ef9ed626-a15a-44e8-9f3f-341d1a6967b7" (UID: "ef9ed626-a15a-44e8-9f3f-341d1a6967b7"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 15:34:06 crc kubenswrapper[4716]: I1209 15:34:06.694234 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "ef9ed626-a15a-44e8-9f3f-341d1a6967b7" (UID: "ef9ed626-a15a-44e8-9f3f-341d1a6967b7"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:34:06 crc kubenswrapper[4716]: I1209 15:34:06.694766 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-kube-api-access-hzczt" (OuterVolumeSpecName: "kube-api-access-hzczt") pod "ef9ed626-a15a-44e8-9f3f-341d1a6967b7" (UID: "ef9ed626-a15a-44e8-9f3f-341d1a6967b7"). InnerVolumeSpecName "kube-api-access-hzczt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:34:06 crc kubenswrapper[4716]: I1209 15:34:06.695286 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-scripts" (OuterVolumeSpecName: "scripts") pod "ef9ed626-a15a-44e8-9f3f-341d1a6967b7" (UID: "ef9ed626-a15a-44e8-9f3f-341d1a6967b7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:34:06 crc kubenswrapper[4716]: I1209 15:34:06.715893 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ef9ed626-a15a-44e8-9f3f-341d1a6967b7" (UID: "ef9ed626-a15a-44e8-9f3f-341d1a6967b7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:34:06 crc kubenswrapper[4716]: I1209 15:34:06.758949 4716 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-logs\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:06 crc kubenswrapper[4716]: I1209 15:34:06.760536 4716 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-etc-machine-id\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:06 crc kubenswrapper[4716]: I1209 15:34:06.760650 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hzczt\" (UniqueName: \"kubernetes.io/projected/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-kube-api-access-hzczt\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:06 crc kubenswrapper[4716]: I1209 15:34:06.760740 4716 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:06 crc kubenswrapper[4716]: I1209 15:34:06.760819 4716 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:06 crc kubenswrapper[4716]: I1209 15:34:06.760888 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:06 crc kubenswrapper[4716]: I1209 15:34:06.814863 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-config-data" (OuterVolumeSpecName: "config-data") pod "ef9ed626-a15a-44e8-9f3f-341d1a6967b7" (UID: "ef9ed626-a15a-44e8-9f3f-341d1a6967b7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:34:06 crc kubenswrapper[4716]: I1209 15:34:06.870023 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef9ed626-a15a-44e8-9f3f-341d1a6967b7-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:06 crc kubenswrapper[4716]: I1209 15:34:06.904091 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-85qhw" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.073666 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ef9ed626-a15a-44e8-9f3f-341d1a6967b7","Type":"ContainerDied","Data":"556862d1efec32c631349562fb72d8444e93c1d3f11e3aad4681d0cb551a64b5"} Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.073904 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.074003 4716 scope.go:117] "RemoveContainer" containerID="edebf391edf04647c06a6b9d3f7b27f7477b570f909356bd43f842cb8ddbea37" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.075493 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1075c22d-76f2-4c48-86b1-23374040e152-catalog-content\") pod \"1075c22d-76f2-4c48-86b1-23374040e152\" (UID: \"1075c22d-76f2-4c48-86b1-23374040e152\") " Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.075590 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1075c22d-76f2-4c48-86b1-23374040e152-utilities\") pod \"1075c22d-76f2-4c48-86b1-23374040e152\" (UID: \"1075c22d-76f2-4c48-86b1-23374040e152\") " Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.075615 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lhqm8\" (UniqueName: \"kubernetes.io/projected/1075c22d-76f2-4c48-86b1-23374040e152-kube-api-access-lhqm8\") pod \"1075c22d-76f2-4c48-86b1-23374040e152\" (UID: \"1075c22d-76f2-4c48-86b1-23374040e152\") " Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.077488 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1075c22d-76f2-4c48-86b1-23374040e152-utilities" (OuterVolumeSpecName: "utilities") pod "1075c22d-76f2-4c48-86b1-23374040e152" (UID: "1075c22d-76f2-4c48-86b1-23374040e152"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.080105 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-85qhw" event={"ID":"1075c22d-76f2-4c48-86b1-23374040e152","Type":"ContainerDied","Data":"a5ed2bf6049f87bb749ae145d30495ab36e33d5185e584e9d06b05dcfd7f53f6"} Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.080296 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-85qhw" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.080718 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1075c22d-76f2-4c48-86b1-23374040e152-kube-api-access-lhqm8" (OuterVolumeSpecName: "kube-api-access-lhqm8") pod "1075c22d-76f2-4c48-86b1-23374040e152" (UID: "1075c22d-76f2-4c48-86b1-23374040e152"). InnerVolumeSpecName "kube-api-access-lhqm8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.116427 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1075c22d-76f2-4c48-86b1-23374040e152-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1075c22d-76f2-4c48-86b1-23374040e152" (UID: "1075c22d-76f2-4c48-86b1-23374040e152"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.153685 4716 scope.go:117] "RemoveContainer" containerID="4f2aeda864489523c86585554b9d8998a96efd0cc74ae0b70e9cdef158de2be2" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.194992 4716 scope.go:117] "RemoveContainer" containerID="a692febd399ac1eae8f990122567fc7d3c009d489f4f505a13d1eac063480af7" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.195751 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1075c22d-76f2-4c48-86b1-23374040e152-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.195785 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1075c22d-76f2-4c48-86b1-23374040e152-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.195798 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lhqm8\" (UniqueName: \"kubernetes.io/projected/1075c22d-76f2-4c48-86b1-23374040e152-kube-api-access-lhqm8\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.199535 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.250053 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.250090 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.250289 4716 scope.go:117] "RemoveContainer" containerID="222741b4681273d5f47996bdc2d3441de8db0ad698b35edc2337fcba1fd7dddd" Dec 09 15:34:07 crc kubenswrapper[4716]: E1209 15:34:07.250510 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1075c22d-76f2-4c48-86b1-23374040e152" containerName="registry-server" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.250524 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="1075c22d-76f2-4c48-86b1-23374040e152" containerName="registry-server" Dec 09 15:34:07 crc kubenswrapper[4716]: E1209 15:34:07.250536 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1075c22d-76f2-4c48-86b1-23374040e152" containerName="extract-content" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.250542 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="1075c22d-76f2-4c48-86b1-23374040e152" containerName="extract-content" Dec 09 15:34:07 crc kubenswrapper[4716]: E1209 15:34:07.250559 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef9ed626-a15a-44e8-9f3f-341d1a6967b7" containerName="cinder-api" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.250565 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef9ed626-a15a-44e8-9f3f-341d1a6967b7" containerName="cinder-api" Dec 09 15:34:07 crc kubenswrapper[4716]: E1209 15:34:07.250582 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1075c22d-76f2-4c48-86b1-23374040e152" containerName="extract-utilities" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.250589 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="1075c22d-76f2-4c48-86b1-23374040e152" containerName="extract-utilities" Dec 09 15:34:07 crc kubenswrapper[4716]: E1209 15:34:07.250658 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef9ed626-a15a-44e8-9f3f-341d1a6967b7" containerName="cinder-api-log" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.250671 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef9ed626-a15a-44e8-9f3f-341d1a6967b7" containerName="cinder-api-log" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.250910 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef9ed626-a15a-44e8-9f3f-341d1a6967b7" containerName="cinder-api" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.250924 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef9ed626-a15a-44e8-9f3f-341d1a6967b7" containerName="cinder-api-log" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.250938 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="1075c22d-76f2-4c48-86b1-23374040e152" containerName="registry-server" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.252243 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.255575 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.255797 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.256598 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.263913 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.299396 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ebb94c0f-cef2-4960-8b18-c073029c813c-config-data\") pod \"cinder-api-0\" (UID: \"ebb94c0f-cef2-4960-8b18-c073029c813c\") " pod="openstack/cinder-api-0" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.299473 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ebb94c0f-cef2-4960-8b18-c073029c813c-config-data-custom\") pod \"cinder-api-0\" (UID: \"ebb94c0f-cef2-4960-8b18-c073029c813c\") " pod="openstack/cinder-api-0" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.299555 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ebb94c0f-cef2-4960-8b18-c073029c813c-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"ebb94c0f-cef2-4960-8b18-c073029c813c\") " pod="openstack/cinder-api-0" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.299612 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ebb94c0f-cef2-4960-8b18-c073029c813c-public-tls-certs\") pod \"cinder-api-0\" (UID: \"ebb94c0f-cef2-4960-8b18-c073029c813c\") " pod="openstack/cinder-api-0" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.299857 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ebb94c0f-cef2-4960-8b18-c073029c813c-logs\") pod \"cinder-api-0\" (UID: \"ebb94c0f-cef2-4960-8b18-c073029c813c\") " pod="openstack/cinder-api-0" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.299995 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jl7nc\" (UniqueName: \"kubernetes.io/projected/ebb94c0f-cef2-4960-8b18-c073029c813c-kube-api-access-jl7nc\") pod \"cinder-api-0\" (UID: \"ebb94c0f-cef2-4960-8b18-c073029c813c\") " pod="openstack/cinder-api-0" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.300091 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ebb94c0f-cef2-4960-8b18-c073029c813c-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"ebb94c0f-cef2-4960-8b18-c073029c813c\") " pod="openstack/cinder-api-0" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.300462 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ebb94c0f-cef2-4960-8b18-c073029c813c-etc-machine-id\") pod \"cinder-api-0\" (UID: \"ebb94c0f-cef2-4960-8b18-c073029c813c\") " pod="openstack/cinder-api-0" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.300500 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ebb94c0f-cef2-4960-8b18-c073029c813c-scripts\") pod \"cinder-api-0\" (UID: \"ebb94c0f-cef2-4960-8b18-c073029c813c\") " pod="openstack/cinder-api-0" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.305033 4716 scope.go:117] "RemoveContainer" containerID="b0a371fe82d3346a610e18cf8290d8c22464d5461287bf72afad2223124c62dd" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.372694 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-5fb7859fb4-vxfkf"] Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.389145 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-84cd58648f-wb5xh"] Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.401997 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-8676697bb7-dl97q"] Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.402045 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ebb94c0f-cef2-4960-8b18-c073029c813c-public-tls-certs\") pod \"cinder-api-0\" (UID: \"ebb94c0f-cef2-4960-8b18-c073029c813c\") " pod="openstack/cinder-api-0" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.402146 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ebb94c0f-cef2-4960-8b18-c073029c813c-logs\") pod \"cinder-api-0\" (UID: \"ebb94c0f-cef2-4960-8b18-c073029c813c\") " pod="openstack/cinder-api-0" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.402199 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jl7nc\" (UniqueName: \"kubernetes.io/projected/ebb94c0f-cef2-4960-8b18-c073029c813c-kube-api-access-jl7nc\") pod \"cinder-api-0\" (UID: \"ebb94c0f-cef2-4960-8b18-c073029c813c\") " pod="openstack/cinder-api-0" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.402232 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ebb94c0f-cef2-4960-8b18-c073029c813c-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"ebb94c0f-cef2-4960-8b18-c073029c813c\") " pod="openstack/cinder-api-0" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.402271 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ebb94c0f-cef2-4960-8b18-c073029c813c-etc-machine-id\") pod \"cinder-api-0\" (UID: \"ebb94c0f-cef2-4960-8b18-c073029c813c\") " pod="openstack/cinder-api-0" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.402299 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ebb94c0f-cef2-4960-8b18-c073029c813c-scripts\") pod \"cinder-api-0\" (UID: \"ebb94c0f-cef2-4960-8b18-c073029c813c\") " pod="openstack/cinder-api-0" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.402332 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ebb94c0f-cef2-4960-8b18-c073029c813c-config-data\") pod \"cinder-api-0\" (UID: \"ebb94c0f-cef2-4960-8b18-c073029c813c\") " pod="openstack/cinder-api-0" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.402372 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ebb94c0f-cef2-4960-8b18-c073029c813c-config-data-custom\") pod \"cinder-api-0\" (UID: \"ebb94c0f-cef2-4960-8b18-c073029c813c\") " pod="openstack/cinder-api-0" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.402420 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ebb94c0f-cef2-4960-8b18-c073029c813c-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"ebb94c0f-cef2-4960-8b18-c073029c813c\") " pod="openstack/cinder-api-0" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.403107 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ebb94c0f-cef2-4960-8b18-c073029c813c-logs\") pod \"cinder-api-0\" (UID: \"ebb94c0f-cef2-4960-8b18-c073029c813c\") " pod="openstack/cinder-api-0" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.403545 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ebb94c0f-cef2-4960-8b18-c073029c813c-etc-machine-id\") pod \"cinder-api-0\" (UID: \"ebb94c0f-cef2-4960-8b18-c073029c813c\") " pod="openstack/cinder-api-0" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.406799 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ebb94c0f-cef2-4960-8b18-c073029c813c-public-tls-certs\") pod \"cinder-api-0\" (UID: \"ebb94c0f-cef2-4960-8b18-c073029c813c\") " pod="openstack/cinder-api-0" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.407337 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ebb94c0f-cef2-4960-8b18-c073029c813c-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"ebb94c0f-cef2-4960-8b18-c073029c813c\") " pod="openstack/cinder-api-0" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.412277 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ebb94c0f-cef2-4960-8b18-c073029c813c-config-data-custom\") pod \"cinder-api-0\" (UID: \"ebb94c0f-cef2-4960-8b18-c073029c813c\") " pod="openstack/cinder-api-0" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.414415 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ebb94c0f-cef2-4960-8b18-c073029c813c-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"ebb94c0f-cef2-4960-8b18-c073029c813c\") " pod="openstack/cinder-api-0" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.416975 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ebb94c0f-cef2-4960-8b18-c073029c813c-config-data\") pod \"cinder-api-0\" (UID: \"ebb94c0f-cef2-4960-8b18-c073029c813c\") " pod="openstack/cinder-api-0" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.417117 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ebb94c0f-cef2-4960-8b18-c073029c813c-scripts\") pod \"cinder-api-0\" (UID: \"ebb94c0f-cef2-4960-8b18-c073029c813c\") " pod="openstack/cinder-api-0" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.429301 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jl7nc\" (UniqueName: \"kubernetes.io/projected/ebb94c0f-cef2-4960-8b18-c073029c813c-kube-api-access-jl7nc\") pod \"cinder-api-0\" (UID: \"ebb94c0f-cef2-4960-8b18-c073029c813c\") " pod="openstack/cinder-api-0" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.433396 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-85qhw"] Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.446082 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-85qhw"] Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.637247 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 09 15:34:07 crc kubenswrapper[4716]: I1209 15:34:07.809943 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-7b57c6bb6b-lx7wx"] Dec 09 15:34:07 crc kubenswrapper[4716]: W1209 15:34:07.995916 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc2468f42_d745_495c_b068_7ae04e50ee4a.slice/crio-2d88ca049cf872ca4d9673e3d13c6955529af8d3ed52913c4a49e3170645097a WatchSource:0}: Error finding container 2d88ca049cf872ca4d9673e3d13c6955529af8d3ed52913c4a49e3170645097a: Status 404 returned error can't find the container with id 2d88ca049cf872ca4d9673e3d13c6955529af8d3ed52913c4a49e3170645097a Dec 09 15:34:08 crc kubenswrapper[4716]: I1209 15:34:08.027387 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:34:08 crc kubenswrapper[4716]: W1209 15:34:08.041585 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod35e82a88_b5cc_4be0_a4b4_b654d67e103d.slice/crio-abadde6e3d5061e245c104cf741952107f76814a7d7e601393ff438cf6230209 WatchSource:0}: Error finding container abadde6e3d5061e245c104cf741952107f76814a7d7e601393ff438cf6230209: Status 404 returned error can't find the container with id abadde6e3d5061e245c104cf741952107f76814a7d7e601393ff438cf6230209 Dec 09 15:34:08 crc kubenswrapper[4716]: I1209 15:34:08.048733 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-5757f44d58-n4z9t"] Dec 09 15:34:08 crc kubenswrapper[4716]: I1209 15:34:08.065282 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-6b76b79b89-8lgpq"] Dec 09 15:34:08 crc kubenswrapper[4716]: I1209 15:34:08.080531 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-76845fb89c-tcnr6"] Dec 09 15:34:08 crc kubenswrapper[4716]: I1209 15:34:08.095260 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-7b685d4d7d-v9gv5"] Dec 09 15:34:08 crc kubenswrapper[4716]: I1209 15:34:08.110525 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5757f44d58-n4z9t" event={"ID":"a54facb6-d928-4957-a382-5f77e827b336","Type":"ContainerStarted","Data":"3ea4161f1348e95c51a973c020cacf5b121f1f80af202f7bebd7b445cc17660f"} Dec 09 15:34:08 crc kubenswrapper[4716]: I1209 15:34:08.111759 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5fb7859fb4-vxfkf" event={"ID":"0595a046-577b-4894-8f05-36663a2a4c64","Type":"ContainerStarted","Data":"bbe50d5479f29167f4e7bd2e678f3c4b02580d44a2d993b1c3f4fa179dc7491c"} Dec 09 15:34:08 crc kubenswrapper[4716]: I1209 15:34:08.112922 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7756b9d78c-h2zld"] Dec 09 15:34:08 crc kubenswrapper[4716]: I1209 15:34:08.117832 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-8676697bb7-dl97q" event={"ID":"917d6c5b-6e8a-4ef6-9057-41a76202707a","Type":"ContainerStarted","Data":"0f2135e081445f9f4af58b189452795791ebff94872285ca28d32afc58a5e53b"} Dec 09 15:34:08 crc kubenswrapper[4716]: I1209 15:34:08.121423 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7756b9d78c-h2zld" event={"ID":"1893ef85-1e86-4247-8950-95c26985ccbc","Type":"ContainerStarted","Data":"c77d9042923476d03a71e7c17b81298f7c00d69b329d62309b7cb92b8516ba8a"} Dec 09 15:34:08 crc kubenswrapper[4716]: I1209 15:34:08.145385 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-76845fb89c-tcnr6" event={"ID":"35e82a88-b5cc-4be0-a4b4-b654d67e103d","Type":"ContainerStarted","Data":"abadde6e3d5061e245c104cf741952107f76814a7d7e601393ff438cf6230209"} Dec 09 15:34:08 crc kubenswrapper[4716]: I1209 15:34:08.159877 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7b57c6bb6b-lx7wx" event={"ID":"e9202dd4-4544-4a66-a4da-160c8c918cac","Type":"ContainerStarted","Data":"22dd3134aac08bf208bf6069fa863bd25e854a6591052f1cf9f1219b933dadb8"} Dec 09 15:34:08 crc kubenswrapper[4716]: I1209 15:34:08.165769 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-6b76b79b89-8lgpq" event={"ID":"d328b5d1-8e4d-4a23-b326-4ab0fd436482","Type":"ContainerStarted","Data":"2e51501a899a07970fa118b63efb401630f99d088c9168f56b561332af17f1ca"} Dec 09 15:34:08 crc kubenswrapper[4716]: I1209 15:34:08.170128 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c2468f42-d745-495c-b068-7ae04e50ee4a","Type":"ContainerStarted","Data":"2d88ca049cf872ca4d9673e3d13c6955529af8d3ed52913c4a49e3170645097a"} Dec 09 15:34:08 crc kubenswrapper[4716]: I1209 15:34:08.174232 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-84cd58648f-wb5xh" event={"ID":"b6b6ca6f-0338-48e9-9397-61cf76346b26","Type":"ContainerStarted","Data":"71b4f9b02e1d185c13525910655145a0e1b13a012bd4ead62a4f7aed5f7719f6"} Dec 09 15:34:08 crc kubenswrapper[4716]: I1209 15:34:08.174295 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-84cd58648f-wb5xh" event={"ID":"b6b6ca6f-0338-48e9-9397-61cf76346b26","Type":"ContainerStarted","Data":"4ac307b276ca1515e9e9ac091c095779101555a0cba2ae6cc95f769bbfa1e629"} Dec 09 15:34:08 crc kubenswrapper[4716]: I1209 15:34:08.175838 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-84cd58648f-wb5xh" Dec 09 15:34:08 crc kubenswrapper[4716]: I1209 15:34:08.178333 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-7b685d4d7d-v9gv5" event={"ID":"0a60d448-8732-476b-9caa-5d47786ebcbf","Type":"ContainerStarted","Data":"5f0e8bc318937c91e9bd3dd0069e49b43a4290adb7bc0594bf5f9d13e6a08909"} Dec 09 15:34:08 crc kubenswrapper[4716]: I1209 15:34:08.197663 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-84cd58648f-wb5xh" podStartSLOduration=12.197611801 podStartE2EDuration="12.197611801s" podCreationTimestamp="2025-12-09 15:33:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:34:08.196977243 +0000 UTC m=+1535.351721231" watchObservedRunningTime="2025-12-09 15:34:08.197611801 +0000 UTC m=+1535.352355789" Dec 09 15:34:08 crc kubenswrapper[4716]: I1209 15:34:08.337755 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Dec 09 15:34:09 crc kubenswrapper[4716]: I1209 15:34:09.264274 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-7b685d4d7d-v9gv5" podStartSLOduration=6.264229701 podStartE2EDuration="6.264229701s" podCreationTimestamp="2025-12-09 15:34:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:34:09.24820473 +0000 UTC m=+1536.402948718" watchObservedRunningTime="2025-12-09 15:34:09.264229701 +0000 UTC m=+1536.418973689" Dec 09 15:34:09 crc kubenswrapper[4716]: I1209 15:34:09.273399 4716 generic.go:334] "Generic (PLEG): container finished" podID="1893ef85-1e86-4247-8950-95c26985ccbc" containerID="eb03d62b4505bdd1ebd857e4a46c24056f996ead1be338eaf9cad1125c38117e" exitCode=0 Dec 09 15:34:09 crc kubenswrapper[4716]: I1209 15:34:09.286891 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1075c22d-76f2-4c48-86b1-23374040e152" path="/var/lib/kubelet/pods/1075c22d-76f2-4c48-86b1-23374040e152/volumes" Dec 09 15:34:09 crc kubenswrapper[4716]: I1209 15:34:09.288530 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ef9ed626-a15a-44e8-9f3f-341d1a6967b7" path="/var/lib/kubelet/pods/ef9ed626-a15a-44e8-9f3f-341d1a6967b7/volumes" Dec 09 15:34:09 crc kubenswrapper[4716]: I1209 15:34:09.289576 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c2468f42-d745-495c-b068-7ae04e50ee4a","Type":"ContainerStarted","Data":"71a6de0e1d0befadb78dd44bcdd9582815fde4416a09dc9cc5bd6d85eb645fe1"} Dec 09 15:34:09 crc kubenswrapper[4716]: I1209 15:34:09.289949 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-7b685d4d7d-v9gv5" event={"ID":"0a60d448-8732-476b-9caa-5d47786ebcbf","Type":"ContainerStarted","Data":"982184fc3981a07de75731fb9466b4ff0446fc7d6a063d7cc6807ba1d5d9e646"} Dec 09 15:34:09 crc kubenswrapper[4716]: I1209 15:34:09.290049 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-7b685d4d7d-v9gv5" Dec 09 15:34:09 crc kubenswrapper[4716]: I1209 15:34:09.290095 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ebb94c0f-cef2-4960-8b18-c073029c813c","Type":"ContainerStarted","Data":"f229b57b939821073f95d4facad8a91d05e0b8b35e84b0961c67065ac7d18d81"} Dec 09 15:34:09 crc kubenswrapper[4716]: I1209 15:34:09.290113 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7756b9d78c-h2zld" event={"ID":"1893ef85-1e86-4247-8950-95c26985ccbc","Type":"ContainerDied","Data":"eb03d62b4505bdd1ebd857e4a46c24056f996ead1be338eaf9cad1125c38117e"} Dec 09 15:34:10 crc kubenswrapper[4716]: I1209 15:34:10.289330 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7756b9d78c-h2zld" event={"ID":"1893ef85-1e86-4247-8950-95c26985ccbc","Type":"ContainerStarted","Data":"15030166a469f0c01e34beed8ba32884e53efc7db49d2f1509100c1c5b992fa3"} Dec 09 15:34:10 crc kubenswrapper[4716]: I1209 15:34:10.289594 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7756b9d78c-h2zld" Dec 09 15:34:10 crc kubenswrapper[4716]: I1209 15:34:10.294230 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ebb94c0f-cef2-4960-8b18-c073029c813c","Type":"ContainerStarted","Data":"1f95b2526ba76933cca29cdc3d7f441c590f8633140546db6780b5c834f39514"} Dec 09 15:34:10 crc kubenswrapper[4716]: I1209 15:34:10.332727 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7756b9d78c-h2zld" podStartSLOduration=14.332702546 podStartE2EDuration="14.332702546s" podCreationTimestamp="2025-12-09 15:33:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:34:10.315176281 +0000 UTC m=+1537.469920269" watchObservedRunningTime="2025-12-09 15:34:10.332702546 +0000 UTC m=+1537.487446584" Dec 09 15:34:11 crc kubenswrapper[4716]: I1209 15:34:11.246985 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="ef9ed626-a15a-44e8-9f3f-341d1a6967b7" containerName="cinder-api" probeResult="failure" output="Get \"http://10.217.0.200:8776/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 09 15:34:12 crc kubenswrapper[4716]: I1209 15:34:12.369498 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c2468f42-d745-495c-b068-7ae04e50ee4a","Type":"ContainerStarted","Data":"91ea182ae177ab8f6bb25df350936378b78bb701d228df700638dd1aab931a37"} Dec 09 15:34:12 crc kubenswrapper[4716]: I1209 15:34:12.373216 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-76845fb89c-tcnr6" event={"ID":"35e82a88-b5cc-4be0-a4b4-b654d67e103d","Type":"ContainerStarted","Data":"85b56f4f0e6d0c4ed869ac2feca5f57f3f2956382fe17db6cbc67597643242d9"} Dec 09 15:34:12 crc kubenswrapper[4716]: I1209 15:34:12.381118 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-6b76b79b89-8lgpq" event={"ID":"d328b5d1-8e4d-4a23-b326-4ab0fd436482","Type":"ContainerStarted","Data":"bc26dfa56af572f72b0c0ea866ad1ee9558eee3023465946645ebd7c3f2fcf58"} Dec 09 15:34:12 crc kubenswrapper[4716]: I1209 15:34:12.381817 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-6b76b79b89-8lgpq" Dec 09 15:34:12 crc kubenswrapper[4716]: I1209 15:34:12.403827 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-6b76b79b89-8lgpq" podStartSLOduration=6.438951287 podStartE2EDuration="9.403800868s" podCreationTimestamp="2025-12-09 15:34:03 +0000 UTC" firstStartedPulling="2025-12-09 15:34:08.031566815 +0000 UTC m=+1535.186310803" lastFinishedPulling="2025-12-09 15:34:10.996416386 +0000 UTC m=+1538.151160384" observedRunningTime="2025-12-09 15:34:12.401051898 +0000 UTC m=+1539.555795886" watchObservedRunningTime="2025-12-09 15:34:12.403800868 +0000 UTC m=+1539.558544856" Dec 09 15:34:13 crc kubenswrapper[4716]: I1209 15:34:13.419269 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-8676697bb7-dl97q" event={"ID":"917d6c5b-6e8a-4ef6-9057-41a76202707a","Type":"ContainerStarted","Data":"fe4a4b467b04bf26297e66335868b8e066fb4e55c6fadfc73f4c2224761d420c"} Dec 09 15:34:13 crc kubenswrapper[4716]: I1209 15:34:13.420887 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-8676697bb7-dl97q" Dec 09 15:34:13 crc kubenswrapper[4716]: I1209 15:34:13.423397 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c2468f42-d745-495c-b068-7ae04e50ee4a","Type":"ContainerStarted","Data":"515154073b07651cc65c5bc467c01016e8b1d94653fd2b5860e4c584f5079e2c"} Dec 09 15:34:13 crc kubenswrapper[4716]: I1209 15:34:13.424545 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5757f44d58-n4z9t" event={"ID":"a54facb6-d928-4957-a382-5f77e827b336","Type":"ContainerStarted","Data":"5d6cb9a0c94acf5980e4639dc00f2a1244af1bef60c0cf08aec92aec21723f0b"} Dec 09 15:34:13 crc kubenswrapper[4716]: I1209 15:34:13.432777 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-5757f44d58-n4z9t" Dec 09 15:34:13 crc kubenswrapper[4716]: I1209 15:34:13.453823 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5fb7859fb4-vxfkf" event={"ID":"0595a046-577b-4894-8f05-36663a2a4c64","Type":"ContainerStarted","Data":"d405f9f61142ef11fe6ef4c407c59f7a554672aa4c5db1d5d26f3d6ad9432c57"} Dec 09 15:34:13 crc kubenswrapper[4716]: I1209 15:34:13.454129 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-api-5fb7859fb4-vxfkf" podUID="0595a046-577b-4894-8f05-36663a2a4c64" containerName="heat-api" containerID="cri-o://d405f9f61142ef11fe6ef4c407c59f7a554672aa4c5db1d5d26f3d6ad9432c57" gracePeriod=60 Dec 09 15:34:13 crc kubenswrapper[4716]: I1209 15:34:13.454575 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-5fb7859fb4-vxfkf" Dec 09 15:34:13 crc kubenswrapper[4716]: I1209 15:34:13.502929 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ebb94c0f-cef2-4960-8b18-c073029c813c","Type":"ContainerStarted","Data":"85e83b438f74a77830115484f39042cb6b77ffd553c4b03b127242e9d79e9eda"} Dec 09 15:34:13 crc kubenswrapper[4716]: I1209 15:34:13.503248 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Dec 09 15:34:13 crc kubenswrapper[4716]: I1209 15:34:13.505979 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-cfnapi-7b57c6bb6b-lx7wx" podUID="e9202dd4-4544-4a66-a4da-160c8c918cac" containerName="heat-cfnapi" containerID="cri-o://8797d8e9a9e05c2eb7977c1ca6a59d8ca91220baf364726a784891636e0d23fe" gracePeriod=60 Dec 09 15:34:13 crc kubenswrapper[4716]: I1209 15:34:13.506202 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7b57c6bb6b-lx7wx" event={"ID":"e9202dd4-4544-4a66-a4da-160c8c918cac","Type":"ContainerStarted","Data":"8797d8e9a9e05c2eb7977c1ca6a59d8ca91220baf364726a784891636e0d23fe"} Dec 09 15:34:13 crc kubenswrapper[4716]: I1209 15:34:13.506408 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-76845fb89c-tcnr6" Dec 09 15:34:13 crc kubenswrapper[4716]: I1209 15:34:13.506442 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-7b57c6bb6b-lx7wx" Dec 09 15:34:13 crc kubenswrapper[4716]: I1209 15:34:13.570817 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-5fb7859fb4-vxfkf" podStartSLOduration=13.918489038 podStartE2EDuration="17.570790862s" podCreationTimestamp="2025-12-09 15:33:56 +0000 UTC" firstStartedPulling="2025-12-09 15:34:07.374064755 +0000 UTC m=+1534.528808743" lastFinishedPulling="2025-12-09 15:34:11.026366589 +0000 UTC m=+1538.181110567" observedRunningTime="2025-12-09 15:34:13.489146679 +0000 UTC m=+1540.643890667" watchObservedRunningTime="2025-12-09 15:34:13.570790862 +0000 UTC m=+1540.725534850" Dec 09 15:34:13 crc kubenswrapper[4716]: I1209 15:34:13.616061 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-5757f44d58-n4z9t" podStartSLOduration=5.653907264 podStartE2EDuration="8.616030036s" podCreationTimestamp="2025-12-09 15:34:05 +0000 UTC" firstStartedPulling="2025-12-09 15:34:08.034310114 +0000 UTC m=+1535.189054102" lastFinishedPulling="2025-12-09 15:34:10.996432886 +0000 UTC m=+1538.151176874" observedRunningTime="2025-12-09 15:34:13.520274376 +0000 UTC m=+1540.675018374" watchObservedRunningTime="2025-12-09 15:34:13.616030036 +0000 UTC m=+1540.770774034" Dec 09 15:34:13 crc kubenswrapper[4716]: I1209 15:34:13.647853 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-8676697bb7-dl97q" podStartSLOduration=6.987696733 podStartE2EDuration="10.639563524s" podCreationTimestamp="2025-12-09 15:34:03 +0000 UTC" firstStartedPulling="2025-12-09 15:34:07.377121043 +0000 UTC m=+1534.531865031" lastFinishedPulling="2025-12-09 15:34:11.028987844 +0000 UTC m=+1538.183731822" observedRunningTime="2025-12-09 15:34:13.550035334 +0000 UTC m=+1540.704779322" watchObservedRunningTime="2025-12-09 15:34:13.639563524 +0000 UTC m=+1540.794307502" Dec 09 15:34:13 crc kubenswrapper[4716]: I1209 15:34:13.670857 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=6.670827055 podStartE2EDuration="6.670827055s" podCreationTimestamp="2025-12-09 15:34:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:34:13.585158696 +0000 UTC m=+1540.739902684" watchObservedRunningTime="2025-12-09 15:34:13.670827055 +0000 UTC m=+1540.825571043" Dec 09 15:34:13 crc kubenswrapper[4716]: I1209 15:34:13.674427 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-76845fb89c-tcnr6" podStartSLOduration=5.705635854 podStartE2EDuration="8.674401778s" podCreationTimestamp="2025-12-09 15:34:05 +0000 UTC" firstStartedPulling="2025-12-09 15:34:08.056966567 +0000 UTC m=+1535.211710555" lastFinishedPulling="2025-12-09 15:34:11.025732491 +0000 UTC m=+1538.180476479" observedRunningTime="2025-12-09 15:34:13.621074481 +0000 UTC m=+1540.775818469" watchObservedRunningTime="2025-12-09 15:34:13.674401778 +0000 UTC m=+1540.829145766" Dec 09 15:34:13 crc kubenswrapper[4716]: I1209 15:34:13.718423 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-7b57c6bb6b-lx7wx" podStartSLOduration=14.517679027 podStartE2EDuration="17.718392106s" podCreationTimestamp="2025-12-09 15:33:56 +0000 UTC" firstStartedPulling="2025-12-09 15:34:07.831928461 +0000 UTC m=+1534.986672449" lastFinishedPulling="2025-12-09 15:34:11.03264154 +0000 UTC m=+1538.187385528" observedRunningTime="2025-12-09 15:34:13.649410768 +0000 UTC m=+1540.804154756" watchObservedRunningTime="2025-12-09 15:34:13.718392106 +0000 UTC m=+1540.873136094" Dec 09 15:34:14 crc kubenswrapper[4716]: I1209 15:34:14.517911 4716 generic.go:334] "Generic (PLEG): container finished" podID="d328b5d1-8e4d-4a23-b326-4ab0fd436482" containerID="bc26dfa56af572f72b0c0ea866ad1ee9558eee3023465946645ebd7c3f2fcf58" exitCode=1 Dec 09 15:34:14 crc kubenswrapper[4716]: I1209 15:34:14.517973 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-6b76b79b89-8lgpq" event={"ID":"d328b5d1-8e4d-4a23-b326-4ab0fd436482","Type":"ContainerDied","Data":"bc26dfa56af572f72b0c0ea866ad1ee9558eee3023465946645ebd7c3f2fcf58"} Dec 09 15:34:14 crc kubenswrapper[4716]: I1209 15:34:14.518749 4716 scope.go:117] "RemoveContainer" containerID="bc26dfa56af572f72b0c0ea866ad1ee9558eee3023465946645ebd7c3f2fcf58" Dec 09 15:34:14 crc kubenswrapper[4716]: I1209 15:34:14.525368 4716 generic.go:334] "Generic (PLEG): container finished" podID="917d6c5b-6e8a-4ef6-9057-41a76202707a" containerID="fe4a4b467b04bf26297e66335868b8e066fb4e55c6fadfc73f4c2224761d420c" exitCode=1 Dec 09 15:34:14 crc kubenswrapper[4716]: I1209 15:34:14.525427 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-8676697bb7-dl97q" event={"ID":"917d6c5b-6e8a-4ef6-9057-41a76202707a","Type":"ContainerDied","Data":"fe4a4b467b04bf26297e66335868b8e066fb4e55c6fadfc73f4c2224761d420c"} Dec 09 15:34:14 crc kubenswrapper[4716]: I1209 15:34:14.527083 4716 scope.go:117] "RemoveContainer" containerID="fe4a4b467b04bf26297e66335868b8e066fb4e55c6fadfc73f4c2224761d420c" Dec 09 15:34:15 crc kubenswrapper[4716]: I1209 15:34:15.558581 4716 generic.go:334] "Generic (PLEG): container finished" podID="e9202dd4-4544-4a66-a4da-160c8c918cac" containerID="8797d8e9a9e05c2eb7977c1ca6a59d8ca91220baf364726a784891636e0d23fe" exitCode=0 Dec 09 15:34:15 crc kubenswrapper[4716]: I1209 15:34:15.558662 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7b57c6bb6b-lx7wx" event={"ID":"e9202dd4-4544-4a66-a4da-160c8c918cac","Type":"ContainerDied","Data":"8797d8e9a9e05c2eb7977c1ca6a59d8ca91220baf364726a784891636e0d23fe"} Dec 09 15:34:15 crc kubenswrapper[4716]: I1209 15:34:15.564534 4716 generic.go:334] "Generic (PLEG): container finished" podID="d328b5d1-8e4d-4a23-b326-4ab0fd436482" containerID="298e67967e2357f11792268f36281a94f3e3f3ed23603d05f717188970b1c1ce" exitCode=1 Dec 09 15:34:15 crc kubenswrapper[4716]: I1209 15:34:15.565478 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-6b76b79b89-8lgpq" event={"ID":"d328b5d1-8e4d-4a23-b326-4ab0fd436482","Type":"ContainerDied","Data":"298e67967e2357f11792268f36281a94f3e3f3ed23603d05f717188970b1c1ce"} Dec 09 15:34:15 crc kubenswrapper[4716]: I1209 15:34:15.565810 4716 scope.go:117] "RemoveContainer" containerID="bc26dfa56af572f72b0c0ea866ad1ee9558eee3023465946645ebd7c3f2fcf58" Dec 09 15:34:15 crc kubenswrapper[4716]: I1209 15:34:15.570092 4716 scope.go:117] "RemoveContainer" containerID="298e67967e2357f11792268f36281a94f3e3f3ed23603d05f717188970b1c1ce" Dec 09 15:34:15 crc kubenswrapper[4716]: E1209 15:34:15.573139 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-6b76b79b89-8lgpq_openstack(d328b5d1-8e4d-4a23-b326-4ab0fd436482)\"" pod="openstack/heat-api-6b76b79b89-8lgpq" podUID="d328b5d1-8e4d-4a23-b326-4ab0fd436482" Dec 09 15:34:15 crc kubenswrapper[4716]: I1209 15:34:15.580440 4716 generic.go:334] "Generic (PLEG): container finished" podID="917d6c5b-6e8a-4ef6-9057-41a76202707a" containerID="606adb63ad6431f6c81d5c7e2310514d2f70d1ef40a016bc747b28e5b9d22170" exitCode=1 Dec 09 15:34:15 crc kubenswrapper[4716]: I1209 15:34:15.580513 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-8676697bb7-dl97q" event={"ID":"917d6c5b-6e8a-4ef6-9057-41a76202707a","Type":"ContainerDied","Data":"606adb63ad6431f6c81d5c7e2310514d2f70d1ef40a016bc747b28e5b9d22170"} Dec 09 15:34:15 crc kubenswrapper[4716]: I1209 15:34:15.581327 4716 scope.go:117] "RemoveContainer" containerID="606adb63ad6431f6c81d5c7e2310514d2f70d1ef40a016bc747b28e5b9d22170" Dec 09 15:34:15 crc kubenswrapper[4716]: E1209 15:34:15.581608 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-8676697bb7-dl97q_openstack(917d6c5b-6e8a-4ef6-9057-41a76202707a)\"" pod="openstack/heat-cfnapi-8676697bb7-dl97q" podUID="917d6c5b-6e8a-4ef6-9057-41a76202707a" Dec 09 15:34:15 crc kubenswrapper[4716]: I1209 15:34:15.597796 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c2468f42-d745-495c-b068-7ae04e50ee4a","Type":"ContainerStarted","Data":"87514f11222283636c35f18fdc67223266921398d321468f28ae5121adc39bed"} Dec 09 15:34:15 crc kubenswrapper[4716]: I1209 15:34:15.597951 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7b57c6bb6b-lx7wx" Dec 09 15:34:15 crc kubenswrapper[4716]: I1209 15:34:15.598123 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c2468f42-d745-495c-b068-7ae04e50ee4a" containerName="ceilometer-central-agent" containerID="cri-o://71a6de0e1d0befadb78dd44bcdd9582815fde4416a09dc9cc5bd6d85eb645fe1" gracePeriod=30 Dec 09 15:34:15 crc kubenswrapper[4716]: I1209 15:34:15.598380 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 09 15:34:15 crc kubenswrapper[4716]: I1209 15:34:15.598420 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c2468f42-d745-495c-b068-7ae04e50ee4a" containerName="proxy-httpd" containerID="cri-o://87514f11222283636c35f18fdc67223266921398d321468f28ae5121adc39bed" gracePeriod=30 Dec 09 15:34:15 crc kubenswrapper[4716]: I1209 15:34:15.598488 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c2468f42-d745-495c-b068-7ae04e50ee4a" containerName="sg-core" containerID="cri-o://515154073b07651cc65c5bc467c01016e8b1d94653fd2b5860e4c584f5079e2c" gracePeriod=30 Dec 09 15:34:15 crc kubenswrapper[4716]: I1209 15:34:15.598525 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c2468f42-d745-495c-b068-7ae04e50ee4a" containerName="ceilometer-notification-agent" containerID="cri-o://91ea182ae177ab8f6bb25df350936378b78bb701d228df700638dd1aab931a37" gracePeriod=30 Dec 09 15:34:15 crc kubenswrapper[4716]: I1209 15:34:15.643259 4716 generic.go:334] "Generic (PLEG): container finished" podID="0595a046-577b-4894-8f05-36663a2a4c64" containerID="d405f9f61142ef11fe6ef4c407c59f7a554672aa4c5db1d5d26f3d6ad9432c57" exitCode=0 Dec 09 15:34:15 crc kubenswrapper[4716]: I1209 15:34:15.643326 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5fb7859fb4-vxfkf" event={"ID":"0595a046-577b-4894-8f05-36663a2a4c64","Type":"ContainerDied","Data":"d405f9f61142ef11fe6ef4c407c59f7a554672aa4c5db1d5d26f3d6ad9432c57"} Dec 09 15:34:15 crc kubenswrapper[4716]: I1209 15:34:15.659329 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=13.542046337 podStartE2EDuration="20.659301586s" podCreationTimestamp="2025-12-09 15:33:55 +0000 UTC" firstStartedPulling="2025-12-09 15:34:08.017068127 +0000 UTC m=+1535.171812115" lastFinishedPulling="2025-12-09 15:34:15.134323386 +0000 UTC m=+1542.289067364" observedRunningTime="2025-12-09 15:34:15.632632788 +0000 UTC m=+1542.787376776" watchObservedRunningTime="2025-12-09 15:34:15.659301586 +0000 UTC m=+1542.814045574" Dec 09 15:34:15 crc kubenswrapper[4716]: I1209 15:34:15.678428 4716 scope.go:117] "RemoveContainer" containerID="fe4a4b467b04bf26297e66335868b8e066fb4e55c6fadfc73f4c2224761d420c" Dec 09 15:34:15 crc kubenswrapper[4716]: I1209 15:34:15.706112 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9202dd4-4544-4a66-a4da-160c8c918cac-config-data\") pod \"e9202dd4-4544-4a66-a4da-160c8c918cac\" (UID: \"e9202dd4-4544-4a66-a4da-160c8c918cac\") " Dec 09 15:34:15 crc kubenswrapper[4716]: I1209 15:34:15.706568 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e9202dd4-4544-4a66-a4da-160c8c918cac-config-data-custom\") pod \"e9202dd4-4544-4a66-a4da-160c8c918cac\" (UID: \"e9202dd4-4544-4a66-a4da-160c8c918cac\") " Dec 09 15:34:15 crc kubenswrapper[4716]: I1209 15:34:15.706739 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w5sbp\" (UniqueName: \"kubernetes.io/projected/e9202dd4-4544-4a66-a4da-160c8c918cac-kube-api-access-w5sbp\") pod \"e9202dd4-4544-4a66-a4da-160c8c918cac\" (UID: \"e9202dd4-4544-4a66-a4da-160c8c918cac\") " Dec 09 15:34:15 crc kubenswrapper[4716]: I1209 15:34:15.706857 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9202dd4-4544-4a66-a4da-160c8c918cac-combined-ca-bundle\") pod \"e9202dd4-4544-4a66-a4da-160c8c918cac\" (UID: \"e9202dd4-4544-4a66-a4da-160c8c918cac\") " Dec 09 15:34:15 crc kubenswrapper[4716]: I1209 15:34:15.719853 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9202dd4-4544-4a66-a4da-160c8c918cac-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "e9202dd4-4544-4a66-a4da-160c8c918cac" (UID: "e9202dd4-4544-4a66-a4da-160c8c918cac"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:34:15 crc kubenswrapper[4716]: I1209 15:34:15.729541 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e9202dd4-4544-4a66-a4da-160c8c918cac-kube-api-access-w5sbp" (OuterVolumeSpecName: "kube-api-access-w5sbp") pod "e9202dd4-4544-4a66-a4da-160c8c918cac" (UID: "e9202dd4-4544-4a66-a4da-160c8c918cac"). InnerVolumeSpecName "kube-api-access-w5sbp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:34:15 crc kubenswrapper[4716]: I1209 15:34:15.781960 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9202dd4-4544-4a66-a4da-160c8c918cac-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e9202dd4-4544-4a66-a4da-160c8c918cac" (UID: "e9202dd4-4544-4a66-a4da-160c8c918cac"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:34:15 crc kubenswrapper[4716]: I1209 15:34:15.812616 4716 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e9202dd4-4544-4a66-a4da-160c8c918cac-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:15 crc kubenswrapper[4716]: I1209 15:34:15.812678 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w5sbp\" (UniqueName: \"kubernetes.io/projected/e9202dd4-4544-4a66-a4da-160c8c918cac-kube-api-access-w5sbp\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:15 crc kubenswrapper[4716]: I1209 15:34:15.812692 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9202dd4-4544-4a66-a4da-160c8c918cac-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:15 crc kubenswrapper[4716]: I1209 15:34:15.831804 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9202dd4-4544-4a66-a4da-160c8c918cac-config-data" (OuterVolumeSpecName: "config-data") pod "e9202dd4-4544-4a66-a4da-160c8c918cac" (UID: "e9202dd4-4544-4a66-a4da-160c8c918cac"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:34:15 crc kubenswrapper[4716]: I1209 15:34:15.916750 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9202dd4-4544-4a66-a4da-160c8c918cac-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:16 crc kubenswrapper[4716]: I1209 15:34:16.208755 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5fb7859fb4-vxfkf" Dec 09 15:34:16 crc kubenswrapper[4716]: I1209 15:34:16.326790 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0595a046-577b-4894-8f05-36663a2a4c64-combined-ca-bundle\") pod \"0595a046-577b-4894-8f05-36663a2a4c64\" (UID: \"0595a046-577b-4894-8f05-36663a2a4c64\") " Dec 09 15:34:16 crc kubenswrapper[4716]: I1209 15:34:16.326912 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0595a046-577b-4894-8f05-36663a2a4c64-config-data-custom\") pod \"0595a046-577b-4894-8f05-36663a2a4c64\" (UID: \"0595a046-577b-4894-8f05-36663a2a4c64\") " Dec 09 15:34:16 crc kubenswrapper[4716]: I1209 15:34:16.326935 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bcrhk\" (UniqueName: \"kubernetes.io/projected/0595a046-577b-4894-8f05-36663a2a4c64-kube-api-access-bcrhk\") pod \"0595a046-577b-4894-8f05-36663a2a4c64\" (UID: \"0595a046-577b-4894-8f05-36663a2a4c64\") " Dec 09 15:34:16 crc kubenswrapper[4716]: I1209 15:34:16.327021 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0595a046-577b-4894-8f05-36663a2a4c64-config-data\") pod \"0595a046-577b-4894-8f05-36663a2a4c64\" (UID: \"0595a046-577b-4894-8f05-36663a2a4c64\") " Dec 09 15:34:16 crc kubenswrapper[4716]: I1209 15:34:16.336107 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0595a046-577b-4894-8f05-36663a2a4c64-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "0595a046-577b-4894-8f05-36663a2a4c64" (UID: "0595a046-577b-4894-8f05-36663a2a4c64"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:34:16 crc kubenswrapper[4716]: I1209 15:34:16.340060 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0595a046-577b-4894-8f05-36663a2a4c64-kube-api-access-bcrhk" (OuterVolumeSpecName: "kube-api-access-bcrhk") pod "0595a046-577b-4894-8f05-36663a2a4c64" (UID: "0595a046-577b-4894-8f05-36663a2a4c64"). InnerVolumeSpecName "kube-api-access-bcrhk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:34:16 crc kubenswrapper[4716]: I1209 15:34:16.375873 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0595a046-577b-4894-8f05-36663a2a4c64-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0595a046-577b-4894-8f05-36663a2a4c64" (UID: "0595a046-577b-4894-8f05-36663a2a4c64"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:34:16 crc kubenswrapper[4716]: I1209 15:34:16.401428 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0595a046-577b-4894-8f05-36663a2a4c64-config-data" (OuterVolumeSpecName: "config-data") pod "0595a046-577b-4894-8f05-36663a2a4c64" (UID: "0595a046-577b-4894-8f05-36663a2a4c64"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:34:16 crc kubenswrapper[4716]: I1209 15:34:16.432510 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0595a046-577b-4894-8f05-36663a2a4c64-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:16 crc kubenswrapper[4716]: I1209 15:34:16.432558 4716 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0595a046-577b-4894-8f05-36663a2a4c64-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:16 crc kubenswrapper[4716]: I1209 15:34:16.432576 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bcrhk\" (UniqueName: \"kubernetes.io/projected/0595a046-577b-4894-8f05-36663a2a4c64-kube-api-access-bcrhk\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:16 crc kubenswrapper[4716]: I1209 15:34:16.432590 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0595a046-577b-4894-8f05-36663a2a4c64-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:16 crc kubenswrapper[4716]: I1209 15:34:16.660165 4716 scope.go:117] "RemoveContainer" containerID="606adb63ad6431f6c81d5c7e2310514d2f70d1ef40a016bc747b28e5b9d22170" Dec 09 15:34:16 crc kubenswrapper[4716]: E1209 15:34:16.660837 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-8676697bb7-dl97q_openstack(917d6c5b-6e8a-4ef6-9057-41a76202707a)\"" pod="openstack/heat-cfnapi-8676697bb7-dl97q" podUID="917d6c5b-6e8a-4ef6-9057-41a76202707a" Dec 09 15:34:16 crc kubenswrapper[4716]: I1209 15:34:16.662511 4716 generic.go:334] "Generic (PLEG): container finished" podID="c2468f42-d745-495c-b068-7ae04e50ee4a" containerID="87514f11222283636c35f18fdc67223266921398d321468f28ae5121adc39bed" exitCode=0 Dec 09 15:34:16 crc kubenswrapper[4716]: I1209 15:34:16.662546 4716 generic.go:334] "Generic (PLEG): container finished" podID="c2468f42-d745-495c-b068-7ae04e50ee4a" containerID="515154073b07651cc65c5bc467c01016e8b1d94653fd2b5860e4c584f5079e2c" exitCode=2 Dec 09 15:34:16 crc kubenswrapper[4716]: I1209 15:34:16.662554 4716 generic.go:334] "Generic (PLEG): container finished" podID="c2468f42-d745-495c-b068-7ae04e50ee4a" containerID="91ea182ae177ab8f6bb25df350936378b78bb701d228df700638dd1aab931a37" exitCode=0 Dec 09 15:34:16 crc kubenswrapper[4716]: I1209 15:34:16.662597 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c2468f42-d745-495c-b068-7ae04e50ee4a","Type":"ContainerDied","Data":"87514f11222283636c35f18fdc67223266921398d321468f28ae5121adc39bed"} Dec 09 15:34:16 crc kubenswrapper[4716]: I1209 15:34:16.662648 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c2468f42-d745-495c-b068-7ae04e50ee4a","Type":"ContainerDied","Data":"515154073b07651cc65c5bc467c01016e8b1d94653fd2b5860e4c584f5079e2c"} Dec 09 15:34:16 crc kubenswrapper[4716]: I1209 15:34:16.662661 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c2468f42-d745-495c-b068-7ae04e50ee4a","Type":"ContainerDied","Data":"91ea182ae177ab8f6bb25df350936378b78bb701d228df700638dd1aab931a37"} Dec 09 15:34:16 crc kubenswrapper[4716]: I1209 15:34:16.667464 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5fb7859fb4-vxfkf" event={"ID":"0595a046-577b-4894-8f05-36663a2a4c64","Type":"ContainerDied","Data":"bbe50d5479f29167f4e7bd2e678f3c4b02580d44a2d993b1c3f4fa179dc7491c"} Dec 09 15:34:16 crc kubenswrapper[4716]: I1209 15:34:16.667513 4716 scope.go:117] "RemoveContainer" containerID="d405f9f61142ef11fe6ef4c407c59f7a554672aa4c5db1d5d26f3d6ad9432c57" Dec 09 15:34:16 crc kubenswrapper[4716]: I1209 15:34:16.667647 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5fb7859fb4-vxfkf" Dec 09 15:34:16 crc kubenswrapper[4716]: I1209 15:34:16.674015 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7b57c6bb6b-lx7wx" event={"ID":"e9202dd4-4544-4a66-a4da-160c8c918cac","Type":"ContainerDied","Data":"22dd3134aac08bf208bf6069fa863bd25e854a6591052f1cf9f1219b933dadb8"} Dec 09 15:34:16 crc kubenswrapper[4716]: I1209 15:34:16.674067 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7b57c6bb6b-lx7wx" Dec 09 15:34:16 crc kubenswrapper[4716]: I1209 15:34:16.723830 4716 scope.go:117] "RemoveContainer" containerID="8797d8e9a9e05c2eb7977c1ca6a59d8ca91220baf364726a784891636e0d23fe" Dec 09 15:34:16 crc kubenswrapper[4716]: I1209 15:34:16.806328 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-7b57c6bb6b-lx7wx"] Dec 09 15:34:16 crc kubenswrapper[4716]: I1209 15:34:16.820879 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-7b57c6bb6b-lx7wx"] Dec 09 15:34:16 crc kubenswrapper[4716]: I1209 15:34:16.832842 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-5fb7859fb4-vxfkf"] Dec 09 15:34:16 crc kubenswrapper[4716]: I1209 15:34:16.846712 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-5fb7859fb4-vxfkf"] Dec 09 15:34:17 crc kubenswrapper[4716]: I1209 15:34:17.231899 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0595a046-577b-4894-8f05-36663a2a4c64" path="/var/lib/kubelet/pods/0595a046-577b-4894-8f05-36663a2a4c64/volumes" Dec 09 15:34:17 crc kubenswrapper[4716]: I1209 15:34:17.232859 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e9202dd4-4544-4a66-a4da-160c8c918cac" path="/var/lib/kubelet/pods/e9202dd4-4544-4a66-a4da-160c8c918cac/volumes" Dec 09 15:34:17 crc kubenswrapper[4716]: I1209 15:34:17.386849 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7756b9d78c-h2zld" Dec 09 15:34:17 crc kubenswrapper[4716]: I1209 15:34:17.474209 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-tm6xz"] Dec 09 15:34:17 crc kubenswrapper[4716]: I1209 15:34:17.474865 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5c9776ccc5-tm6xz" podUID="89424e07-4a8d-47e7-b0f0-4d5161773f94" containerName="dnsmasq-dns" containerID="cri-o://2ff5d67d362b7047d8a1a4302e41343f2ffeef6fa5dc33b39b0c6a0385d5cca2" gracePeriod=10 Dec 09 15:34:17 crc kubenswrapper[4716]: I1209 15:34:17.726741 4716 generic.go:334] "Generic (PLEG): container finished" podID="89424e07-4a8d-47e7-b0f0-4d5161773f94" containerID="2ff5d67d362b7047d8a1a4302e41343f2ffeef6fa5dc33b39b0c6a0385d5cca2" exitCode=0 Dec 09 15:34:17 crc kubenswrapper[4716]: I1209 15:34:17.726822 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-tm6xz" event={"ID":"89424e07-4a8d-47e7-b0f0-4d5161773f94","Type":"ContainerDied","Data":"2ff5d67d362b7047d8a1a4302e41343f2ffeef6fa5dc33b39b0c6a0385d5cca2"} Dec 09 15:34:18 crc kubenswrapper[4716]: I1209 15:34:18.227329 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-tm6xz" Dec 09 15:34:18 crc kubenswrapper[4716]: I1209 15:34:18.392728 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/89424e07-4a8d-47e7-b0f0-4d5161773f94-dns-swift-storage-0\") pod \"89424e07-4a8d-47e7-b0f0-4d5161773f94\" (UID: \"89424e07-4a8d-47e7-b0f0-4d5161773f94\") " Dec 09 15:34:18 crc kubenswrapper[4716]: I1209 15:34:18.392798 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/89424e07-4a8d-47e7-b0f0-4d5161773f94-ovsdbserver-nb\") pod \"89424e07-4a8d-47e7-b0f0-4d5161773f94\" (UID: \"89424e07-4a8d-47e7-b0f0-4d5161773f94\") " Dec 09 15:34:18 crc kubenswrapper[4716]: I1209 15:34:18.392871 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/89424e07-4a8d-47e7-b0f0-4d5161773f94-dns-svc\") pod \"89424e07-4a8d-47e7-b0f0-4d5161773f94\" (UID: \"89424e07-4a8d-47e7-b0f0-4d5161773f94\") " Dec 09 15:34:18 crc kubenswrapper[4716]: I1209 15:34:18.392919 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/89424e07-4a8d-47e7-b0f0-4d5161773f94-config\") pod \"89424e07-4a8d-47e7-b0f0-4d5161773f94\" (UID: \"89424e07-4a8d-47e7-b0f0-4d5161773f94\") " Dec 09 15:34:18 crc kubenswrapper[4716]: I1209 15:34:18.392958 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/89424e07-4a8d-47e7-b0f0-4d5161773f94-ovsdbserver-sb\") pod \"89424e07-4a8d-47e7-b0f0-4d5161773f94\" (UID: \"89424e07-4a8d-47e7-b0f0-4d5161773f94\") " Dec 09 15:34:18 crc kubenswrapper[4716]: I1209 15:34:18.393068 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g76tp\" (UniqueName: \"kubernetes.io/projected/89424e07-4a8d-47e7-b0f0-4d5161773f94-kube-api-access-g76tp\") pod \"89424e07-4a8d-47e7-b0f0-4d5161773f94\" (UID: \"89424e07-4a8d-47e7-b0f0-4d5161773f94\") " Dec 09 15:34:18 crc kubenswrapper[4716]: I1209 15:34:18.407974 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/89424e07-4a8d-47e7-b0f0-4d5161773f94-kube-api-access-g76tp" (OuterVolumeSpecName: "kube-api-access-g76tp") pod "89424e07-4a8d-47e7-b0f0-4d5161773f94" (UID: "89424e07-4a8d-47e7-b0f0-4d5161773f94"). InnerVolumeSpecName "kube-api-access-g76tp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:34:18 crc kubenswrapper[4716]: I1209 15:34:18.485359 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/89424e07-4a8d-47e7-b0f0-4d5161773f94-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "89424e07-4a8d-47e7-b0f0-4d5161773f94" (UID: "89424e07-4a8d-47e7-b0f0-4d5161773f94"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:34:18 crc kubenswrapper[4716]: I1209 15:34:18.496544 4716 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/89424e07-4a8d-47e7-b0f0-4d5161773f94-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:18 crc kubenswrapper[4716]: I1209 15:34:18.496578 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g76tp\" (UniqueName: \"kubernetes.io/projected/89424e07-4a8d-47e7-b0f0-4d5161773f94-kube-api-access-g76tp\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:18 crc kubenswrapper[4716]: I1209 15:34:18.497988 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/89424e07-4a8d-47e7-b0f0-4d5161773f94-config" (OuterVolumeSpecName: "config") pod "89424e07-4a8d-47e7-b0f0-4d5161773f94" (UID: "89424e07-4a8d-47e7-b0f0-4d5161773f94"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:34:18 crc kubenswrapper[4716]: I1209 15:34:18.502358 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/89424e07-4a8d-47e7-b0f0-4d5161773f94-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "89424e07-4a8d-47e7-b0f0-4d5161773f94" (UID: "89424e07-4a8d-47e7-b0f0-4d5161773f94"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:34:18 crc kubenswrapper[4716]: I1209 15:34:18.507561 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/89424e07-4a8d-47e7-b0f0-4d5161773f94-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "89424e07-4a8d-47e7-b0f0-4d5161773f94" (UID: "89424e07-4a8d-47e7-b0f0-4d5161773f94"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:34:18 crc kubenswrapper[4716]: I1209 15:34:18.531037 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/89424e07-4a8d-47e7-b0f0-4d5161773f94-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "89424e07-4a8d-47e7-b0f0-4d5161773f94" (UID: "89424e07-4a8d-47e7-b0f0-4d5161773f94"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:34:18 crc kubenswrapper[4716]: I1209 15:34:18.599341 4716 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/89424e07-4a8d-47e7-b0f0-4d5161773f94-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:18 crc kubenswrapper[4716]: I1209 15:34:18.599391 4716 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/89424e07-4a8d-47e7-b0f0-4d5161773f94-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:18 crc kubenswrapper[4716]: I1209 15:34:18.599406 4716 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/89424e07-4a8d-47e7-b0f0-4d5161773f94-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:18 crc kubenswrapper[4716]: I1209 15:34:18.599415 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/89424e07-4a8d-47e7-b0f0-4d5161773f94-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:18 crc kubenswrapper[4716]: I1209 15:34:18.617474 4716 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-cfnapi-8676697bb7-dl97q" Dec 09 15:34:18 crc kubenswrapper[4716]: I1209 15:34:18.617638 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-8676697bb7-dl97q" Dec 09 15:34:18 crc kubenswrapper[4716]: I1209 15:34:18.618084 4716 scope.go:117] "RemoveContainer" containerID="606adb63ad6431f6c81d5c7e2310514d2f70d1ef40a016bc747b28e5b9d22170" Dec 09 15:34:18 crc kubenswrapper[4716]: E1209 15:34:18.618433 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-8676697bb7-dl97q_openstack(917d6c5b-6e8a-4ef6-9057-41a76202707a)\"" pod="openstack/heat-cfnapi-8676697bb7-dl97q" podUID="917d6c5b-6e8a-4ef6-9057-41a76202707a" Dec 09 15:34:18 crc kubenswrapper[4716]: I1209 15:34:18.640081 4716 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-api-6b76b79b89-8lgpq" Dec 09 15:34:18 crc kubenswrapper[4716]: I1209 15:34:18.640993 4716 scope.go:117] "RemoveContainer" containerID="298e67967e2357f11792268f36281a94f3e3f3ed23603d05f717188970b1c1ce" Dec 09 15:34:18 crc kubenswrapper[4716]: E1209 15:34:18.641316 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-6b76b79b89-8lgpq_openstack(d328b5d1-8e4d-4a23-b326-4ab0fd436482)\"" pod="openstack/heat-api-6b76b79b89-8lgpq" podUID="d328b5d1-8e4d-4a23-b326-4ab0fd436482" Dec 09 15:34:18 crc kubenswrapper[4716]: I1209 15:34:18.641695 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-6b76b79b89-8lgpq" Dec 09 15:34:18 crc kubenswrapper[4716]: I1209 15:34:18.763675 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-tm6xz" event={"ID":"89424e07-4a8d-47e7-b0f0-4d5161773f94","Type":"ContainerDied","Data":"0c3c9bafffe6d7bc606d4a029859b8ae03d27f643bc27b0166a713f80cc62869"} Dec 09 15:34:18 crc kubenswrapper[4716]: I1209 15:34:18.763723 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-tm6xz" Dec 09 15:34:18 crc kubenswrapper[4716]: I1209 15:34:18.763741 4716 scope.go:117] "RemoveContainer" containerID="2ff5d67d362b7047d8a1a4302e41343f2ffeef6fa5dc33b39b0c6a0385d5cca2" Dec 09 15:34:18 crc kubenswrapper[4716]: I1209 15:34:18.781701 4716 scope.go:117] "RemoveContainer" containerID="298e67967e2357f11792268f36281a94f3e3f3ed23603d05f717188970b1c1ce" Dec 09 15:34:18 crc kubenswrapper[4716]: E1209 15:34:18.781972 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-6b76b79b89-8lgpq_openstack(d328b5d1-8e4d-4a23-b326-4ab0fd436482)\"" pod="openstack/heat-api-6b76b79b89-8lgpq" podUID="d328b5d1-8e4d-4a23-b326-4ab0fd436482" Dec 09 15:34:18 crc kubenswrapper[4716]: I1209 15:34:18.783776 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"90ad0d77-c429-467b-a32d-46be1ccd1c9b","Type":"ContainerStarted","Data":"e0d58ec45ae37ede2d160ad9a1fb9c1b3778db2b0b316578a9f4c9687e57c876"} Dec 09 15:34:18 crc kubenswrapper[4716]: I1209 15:34:18.784859 4716 scope.go:117] "RemoveContainer" containerID="606adb63ad6431f6c81d5c7e2310514d2f70d1ef40a016bc747b28e5b9d22170" Dec 09 15:34:18 crc kubenswrapper[4716]: E1209 15:34:18.785210 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-8676697bb7-dl97q_openstack(917d6c5b-6e8a-4ef6-9057-41a76202707a)\"" pod="openstack/heat-cfnapi-8676697bb7-dl97q" podUID="917d6c5b-6e8a-4ef6-9057-41a76202707a" Dec 09 15:34:18 crc kubenswrapper[4716]: I1209 15:34:18.802839 4716 scope.go:117] "RemoveContainer" containerID="1f74a13cb12e58636b15e39e154981d688769dcd573ec73f8539709e1c487d2b" Dec 09 15:34:18 crc kubenswrapper[4716]: I1209 15:34:18.824109 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.603435172 podStartE2EDuration="31.824084009s" podCreationTimestamp="2025-12-09 15:33:47 +0000 UTC" firstStartedPulling="2025-12-09 15:33:48.410601963 +0000 UTC m=+1515.565345951" lastFinishedPulling="2025-12-09 15:34:17.6312508 +0000 UTC m=+1544.785994788" observedRunningTime="2025-12-09 15:34:18.811216798 +0000 UTC m=+1545.965960776" watchObservedRunningTime="2025-12-09 15:34:18.824084009 +0000 UTC m=+1545.978827997" Dec 09 15:34:18 crc kubenswrapper[4716]: I1209 15:34:18.870732 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-tm6xz"] Dec 09 15:34:18 crc kubenswrapper[4716]: I1209 15:34:18.904374 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-tm6xz"] Dec 09 15:34:19 crc kubenswrapper[4716]: I1209 15:34:19.229309 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="89424e07-4a8d-47e7-b0f0-4d5161773f94" path="/var/lib/kubelet/pods/89424e07-4a8d-47e7-b0f0-4d5161773f94/volumes" Dec 09 15:34:20 crc kubenswrapper[4716]: I1209 15:34:20.537009 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Dec 09 15:34:22 crc kubenswrapper[4716]: I1209 15:34:22.964048 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-5757f44d58-n4z9t" Dec 09 15:34:23 crc kubenswrapper[4716]: I1209 15:34:23.033526 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-6b76b79b89-8lgpq"] Dec 09 15:34:23 crc kubenswrapper[4716]: I1209 15:34:23.257049 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-76845fb89c-tcnr6" Dec 09 15:34:23 crc kubenswrapper[4716]: I1209 15:34:23.342450 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-8676697bb7-dl97q"] Dec 09 15:34:23 crc kubenswrapper[4716]: I1209 15:34:23.571930 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-7b685d4d7d-v9gv5" Dec 09 15:34:23 crc kubenswrapper[4716]: I1209 15:34:23.649200 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-84cd58648f-wb5xh"] Dec 09 15:34:23 crc kubenswrapper[4716]: I1209 15:34:23.649467 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-engine-84cd58648f-wb5xh" podUID="b6b6ca6f-0338-48e9-9397-61cf76346b26" containerName="heat-engine" containerID="cri-o://71b4f9b02e1d185c13525910655145a0e1b13a012bd4ead62a4f7aed5f7719f6" gracePeriod=60 Dec 09 15:34:23 crc kubenswrapper[4716]: E1209 15:34:23.668253 4716 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="71b4f9b02e1d185c13525910655145a0e1b13a012bd4ead62a4f7aed5f7719f6" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Dec 09 15:34:23 crc kubenswrapper[4716]: E1209 15:34:23.669908 4716 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="71b4f9b02e1d185c13525910655145a0e1b13a012bd4ead62a4f7aed5f7719f6" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Dec 09 15:34:23 crc kubenswrapper[4716]: E1209 15:34:23.687097 4716 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="71b4f9b02e1d185c13525910655145a0e1b13a012bd4ead62a4f7aed5f7719f6" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Dec 09 15:34:23 crc kubenswrapper[4716]: E1209 15:34:23.687183 4716 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-84cd58648f-wb5xh" podUID="b6b6ca6f-0338-48e9-9397-61cf76346b26" containerName="heat-engine" Dec 09 15:34:23 crc kubenswrapper[4716]: I1209 15:34:23.852224 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-6b76b79b89-8lgpq" event={"ID":"d328b5d1-8e4d-4a23-b326-4ab0fd436482","Type":"ContainerDied","Data":"2e51501a899a07970fa118b63efb401630f99d088c9168f56b561332af17f1ca"} Dec 09 15:34:23 crc kubenswrapper[4716]: I1209 15:34:23.852266 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2e51501a899a07970fa118b63efb401630f99d088c9168f56b561332af17f1ca" Dec 09 15:34:23 crc kubenswrapper[4716]: I1209 15:34:23.963941 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-6b76b79b89-8lgpq" Dec 09 15:34:23 crc kubenswrapper[4716]: I1209 15:34:23.970973 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-8676697bb7-dl97q" Dec 09 15:34:24 crc kubenswrapper[4716]: I1209 15:34:24.042051 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/917d6c5b-6e8a-4ef6-9057-41a76202707a-config-data\") pod \"917d6c5b-6e8a-4ef6-9057-41a76202707a\" (UID: \"917d6c5b-6e8a-4ef6-9057-41a76202707a\") " Dec 09 15:34:24 crc kubenswrapper[4716]: I1209 15:34:24.042117 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d328b5d1-8e4d-4a23-b326-4ab0fd436482-config-data-custom\") pod \"d328b5d1-8e4d-4a23-b326-4ab0fd436482\" (UID: \"d328b5d1-8e4d-4a23-b326-4ab0fd436482\") " Dec 09 15:34:24 crc kubenswrapper[4716]: I1209 15:34:24.042306 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/917d6c5b-6e8a-4ef6-9057-41a76202707a-config-data-custom\") pod \"917d6c5b-6e8a-4ef6-9057-41a76202707a\" (UID: \"917d6c5b-6e8a-4ef6-9057-41a76202707a\") " Dec 09 15:34:24 crc kubenswrapper[4716]: I1209 15:34:24.042358 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d5n6g\" (UniqueName: \"kubernetes.io/projected/d328b5d1-8e4d-4a23-b326-4ab0fd436482-kube-api-access-d5n6g\") pod \"d328b5d1-8e4d-4a23-b326-4ab0fd436482\" (UID: \"d328b5d1-8e4d-4a23-b326-4ab0fd436482\") " Dec 09 15:34:24 crc kubenswrapper[4716]: I1209 15:34:24.042424 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d328b5d1-8e4d-4a23-b326-4ab0fd436482-combined-ca-bundle\") pod \"d328b5d1-8e4d-4a23-b326-4ab0fd436482\" (UID: \"d328b5d1-8e4d-4a23-b326-4ab0fd436482\") " Dec 09 15:34:24 crc kubenswrapper[4716]: I1209 15:34:24.042447 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/917d6c5b-6e8a-4ef6-9057-41a76202707a-combined-ca-bundle\") pod \"917d6c5b-6e8a-4ef6-9057-41a76202707a\" (UID: \"917d6c5b-6e8a-4ef6-9057-41a76202707a\") " Dec 09 15:34:24 crc kubenswrapper[4716]: I1209 15:34:24.042542 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d328b5d1-8e4d-4a23-b326-4ab0fd436482-config-data\") pod \"d328b5d1-8e4d-4a23-b326-4ab0fd436482\" (UID: \"d328b5d1-8e4d-4a23-b326-4ab0fd436482\") " Dec 09 15:34:24 crc kubenswrapper[4716]: I1209 15:34:24.042607 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f85tb\" (UniqueName: \"kubernetes.io/projected/917d6c5b-6e8a-4ef6-9057-41a76202707a-kube-api-access-f85tb\") pod \"917d6c5b-6e8a-4ef6-9057-41a76202707a\" (UID: \"917d6c5b-6e8a-4ef6-9057-41a76202707a\") " Dec 09 15:34:24 crc kubenswrapper[4716]: I1209 15:34:24.060765 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/917d6c5b-6e8a-4ef6-9057-41a76202707a-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "917d6c5b-6e8a-4ef6-9057-41a76202707a" (UID: "917d6c5b-6e8a-4ef6-9057-41a76202707a"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:34:24 crc kubenswrapper[4716]: I1209 15:34:24.060807 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/917d6c5b-6e8a-4ef6-9057-41a76202707a-kube-api-access-f85tb" (OuterVolumeSpecName: "kube-api-access-f85tb") pod "917d6c5b-6e8a-4ef6-9057-41a76202707a" (UID: "917d6c5b-6e8a-4ef6-9057-41a76202707a"). InnerVolumeSpecName "kube-api-access-f85tb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:34:24 crc kubenswrapper[4716]: I1209 15:34:24.065824 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d328b5d1-8e4d-4a23-b326-4ab0fd436482-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "d328b5d1-8e4d-4a23-b326-4ab0fd436482" (UID: "d328b5d1-8e4d-4a23-b326-4ab0fd436482"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:34:24 crc kubenswrapper[4716]: I1209 15:34:24.086759 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/917d6c5b-6e8a-4ef6-9057-41a76202707a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "917d6c5b-6e8a-4ef6-9057-41a76202707a" (UID: "917d6c5b-6e8a-4ef6-9057-41a76202707a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:34:24 crc kubenswrapper[4716]: I1209 15:34:24.109390 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d328b5d1-8e4d-4a23-b326-4ab0fd436482-kube-api-access-d5n6g" (OuterVolumeSpecName: "kube-api-access-d5n6g") pod "d328b5d1-8e4d-4a23-b326-4ab0fd436482" (UID: "d328b5d1-8e4d-4a23-b326-4ab0fd436482"). InnerVolumeSpecName "kube-api-access-d5n6g". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:34:24 crc kubenswrapper[4716]: I1209 15:34:24.123923 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d328b5d1-8e4d-4a23-b326-4ab0fd436482-config-data" (OuterVolumeSpecName: "config-data") pod "d328b5d1-8e4d-4a23-b326-4ab0fd436482" (UID: "d328b5d1-8e4d-4a23-b326-4ab0fd436482"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:34:24 crc kubenswrapper[4716]: I1209 15:34:24.145903 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/917d6c5b-6e8a-4ef6-9057-41a76202707a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:24 crc kubenswrapper[4716]: I1209 15:34:24.145942 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d328b5d1-8e4d-4a23-b326-4ab0fd436482-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:24 crc kubenswrapper[4716]: I1209 15:34:24.145953 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f85tb\" (UniqueName: \"kubernetes.io/projected/917d6c5b-6e8a-4ef6-9057-41a76202707a-kube-api-access-f85tb\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:24 crc kubenswrapper[4716]: I1209 15:34:24.145984 4716 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d328b5d1-8e4d-4a23-b326-4ab0fd436482-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:24 crc kubenswrapper[4716]: I1209 15:34:24.146022 4716 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/917d6c5b-6e8a-4ef6-9057-41a76202707a-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:24 crc kubenswrapper[4716]: I1209 15:34:24.146033 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d5n6g\" (UniqueName: \"kubernetes.io/projected/d328b5d1-8e4d-4a23-b326-4ab0fd436482-kube-api-access-d5n6g\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:24 crc kubenswrapper[4716]: I1209 15:34:24.168859 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d328b5d1-8e4d-4a23-b326-4ab0fd436482-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d328b5d1-8e4d-4a23-b326-4ab0fd436482" (UID: "d328b5d1-8e4d-4a23-b326-4ab0fd436482"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:34:24 crc kubenswrapper[4716]: I1209 15:34:24.201245 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/917d6c5b-6e8a-4ef6-9057-41a76202707a-config-data" (OuterVolumeSpecName: "config-data") pod "917d6c5b-6e8a-4ef6-9057-41a76202707a" (UID: "917d6c5b-6e8a-4ef6-9057-41a76202707a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:34:24 crc kubenswrapper[4716]: I1209 15:34:24.248583 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/917d6c5b-6e8a-4ef6-9057-41a76202707a-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:24 crc kubenswrapper[4716]: I1209 15:34:24.248635 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d328b5d1-8e4d-4a23-b326-4ab0fd436482-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:24 crc kubenswrapper[4716]: I1209 15:34:24.887208 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-8676697bb7-dl97q" Dec 09 15:34:24 crc kubenswrapper[4716]: I1209 15:34:24.887311 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-8676697bb7-dl97q" event={"ID":"917d6c5b-6e8a-4ef6-9057-41a76202707a","Type":"ContainerDied","Data":"0f2135e081445f9f4af58b189452795791ebff94872285ca28d32afc58a5e53b"} Dec 09 15:34:24 crc kubenswrapper[4716]: I1209 15:34:24.890780 4716 scope.go:117] "RemoveContainer" containerID="606adb63ad6431f6c81d5c7e2310514d2f70d1ef40a016bc747b28e5b9d22170" Dec 09 15:34:24 crc kubenswrapper[4716]: I1209 15:34:24.894591 4716 generic.go:334] "Generic (PLEG): container finished" podID="c2468f42-d745-495c-b068-7ae04e50ee4a" containerID="71a6de0e1d0befadb78dd44bcdd9582815fde4416a09dc9cc5bd6d85eb645fe1" exitCode=0 Dec 09 15:34:24 crc kubenswrapper[4716]: I1209 15:34:24.894803 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-6b76b79b89-8lgpq" Dec 09 15:34:24 crc kubenswrapper[4716]: I1209 15:34:24.896524 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c2468f42-d745-495c-b068-7ae04e50ee4a","Type":"ContainerDied","Data":"71a6de0e1d0befadb78dd44bcdd9582815fde4416a09dc9cc5bd6d85eb645fe1"} Dec 09 15:34:25 crc kubenswrapper[4716]: I1209 15:34:25.129806 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 15:34:25 crc kubenswrapper[4716]: I1209 15:34:25.136817 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-6b76b79b89-8lgpq"] Dec 09 15:34:25 crc kubenswrapper[4716]: I1209 15:34:25.164220 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-6b76b79b89-8lgpq"] Dec 09 15:34:25 crc kubenswrapper[4716]: I1209 15:34:25.176004 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-8676697bb7-dl97q"] Dec 09 15:34:25 crc kubenswrapper[4716]: I1209 15:34:25.199645 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-8676697bb7-dl97q"] Dec 09 15:34:25 crc kubenswrapper[4716]: I1209 15:34:25.229015 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="917d6c5b-6e8a-4ef6-9057-41a76202707a" path="/var/lib/kubelet/pods/917d6c5b-6e8a-4ef6-9057-41a76202707a/volumes" Dec 09 15:34:25 crc kubenswrapper[4716]: I1209 15:34:25.229825 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d328b5d1-8e4d-4a23-b326-4ab0fd436482" path="/var/lib/kubelet/pods/d328b5d1-8e4d-4a23-b326-4ab0fd436482/volumes" Dec 09 15:34:25 crc kubenswrapper[4716]: I1209 15:34:25.287277 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2468f42-d745-495c-b068-7ae04e50ee4a-scripts\") pod \"c2468f42-d745-495c-b068-7ae04e50ee4a\" (UID: \"c2468f42-d745-495c-b068-7ae04e50ee4a\") " Dec 09 15:34:25 crc kubenswrapper[4716]: I1209 15:34:25.287341 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c2468f42-d745-495c-b068-7ae04e50ee4a-log-httpd\") pod \"c2468f42-d745-495c-b068-7ae04e50ee4a\" (UID: \"c2468f42-d745-495c-b068-7ae04e50ee4a\") " Dec 09 15:34:25 crc kubenswrapper[4716]: I1209 15:34:25.287494 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2468f42-d745-495c-b068-7ae04e50ee4a-config-data\") pod \"c2468f42-d745-495c-b068-7ae04e50ee4a\" (UID: \"c2468f42-d745-495c-b068-7ae04e50ee4a\") " Dec 09 15:34:25 crc kubenswrapper[4716]: I1209 15:34:25.287587 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2468f42-d745-495c-b068-7ae04e50ee4a-combined-ca-bundle\") pod \"c2468f42-d745-495c-b068-7ae04e50ee4a\" (UID: \"c2468f42-d745-495c-b068-7ae04e50ee4a\") " Dec 09 15:34:25 crc kubenswrapper[4716]: I1209 15:34:25.287678 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c2468f42-d745-495c-b068-7ae04e50ee4a-run-httpd\") pod \"c2468f42-d745-495c-b068-7ae04e50ee4a\" (UID: \"c2468f42-d745-495c-b068-7ae04e50ee4a\") " Dec 09 15:34:25 crc kubenswrapper[4716]: I1209 15:34:25.287754 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-95jhm\" (UniqueName: \"kubernetes.io/projected/c2468f42-d745-495c-b068-7ae04e50ee4a-kube-api-access-95jhm\") pod \"c2468f42-d745-495c-b068-7ae04e50ee4a\" (UID: \"c2468f42-d745-495c-b068-7ae04e50ee4a\") " Dec 09 15:34:25 crc kubenswrapper[4716]: I1209 15:34:25.287786 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c2468f42-d745-495c-b068-7ae04e50ee4a-sg-core-conf-yaml\") pod \"c2468f42-d745-495c-b068-7ae04e50ee4a\" (UID: \"c2468f42-d745-495c-b068-7ae04e50ee4a\") " Dec 09 15:34:25 crc kubenswrapper[4716]: I1209 15:34:25.289487 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c2468f42-d745-495c-b068-7ae04e50ee4a-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "c2468f42-d745-495c-b068-7ae04e50ee4a" (UID: "c2468f42-d745-495c-b068-7ae04e50ee4a"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:34:25 crc kubenswrapper[4716]: I1209 15:34:25.292262 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c2468f42-d745-495c-b068-7ae04e50ee4a-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "c2468f42-d745-495c-b068-7ae04e50ee4a" (UID: "c2468f42-d745-495c-b068-7ae04e50ee4a"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:34:25 crc kubenswrapper[4716]: I1209 15:34:25.320039 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2468f42-d745-495c-b068-7ae04e50ee4a-scripts" (OuterVolumeSpecName: "scripts") pod "c2468f42-d745-495c-b068-7ae04e50ee4a" (UID: "c2468f42-d745-495c-b068-7ae04e50ee4a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:34:25 crc kubenswrapper[4716]: I1209 15:34:25.342407 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2468f42-d745-495c-b068-7ae04e50ee4a-kube-api-access-95jhm" (OuterVolumeSpecName: "kube-api-access-95jhm") pod "c2468f42-d745-495c-b068-7ae04e50ee4a" (UID: "c2468f42-d745-495c-b068-7ae04e50ee4a"). InnerVolumeSpecName "kube-api-access-95jhm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:34:25 crc kubenswrapper[4716]: I1209 15:34:25.390884 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-95jhm\" (UniqueName: \"kubernetes.io/projected/c2468f42-d745-495c-b068-7ae04e50ee4a-kube-api-access-95jhm\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:25 crc kubenswrapper[4716]: I1209 15:34:25.390920 4716 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2468f42-d745-495c-b068-7ae04e50ee4a-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:25 crc kubenswrapper[4716]: I1209 15:34:25.390932 4716 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c2468f42-d745-495c-b068-7ae04e50ee4a-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:25 crc kubenswrapper[4716]: I1209 15:34:25.390941 4716 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c2468f42-d745-495c-b068-7ae04e50ee4a-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:25 crc kubenswrapper[4716]: I1209 15:34:25.396800 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2468f42-d745-495c-b068-7ae04e50ee4a-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "c2468f42-d745-495c-b068-7ae04e50ee4a" (UID: "c2468f42-d745-495c-b068-7ae04e50ee4a"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:34:25 crc kubenswrapper[4716]: I1209 15:34:25.407998 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2468f42-d745-495c-b068-7ae04e50ee4a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c2468f42-d745-495c-b068-7ae04e50ee4a" (UID: "c2468f42-d745-495c-b068-7ae04e50ee4a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:34:25 crc kubenswrapper[4716]: I1209 15:34:25.453801 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2468f42-d745-495c-b068-7ae04e50ee4a-config-data" (OuterVolumeSpecName: "config-data") pod "c2468f42-d745-495c-b068-7ae04e50ee4a" (UID: "c2468f42-d745-495c-b068-7ae04e50ee4a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:34:25 crc kubenswrapper[4716]: I1209 15:34:25.493800 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2468f42-d745-495c-b068-7ae04e50ee4a-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:25 crc kubenswrapper[4716]: I1209 15:34:25.493840 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2468f42-d745-495c-b068-7ae04e50ee4a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:25 crc kubenswrapper[4716]: I1209 15:34:25.493852 4716 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c2468f42-d745-495c-b068-7ae04e50ee4a-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:25 crc kubenswrapper[4716]: I1209 15:34:25.912367 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c2468f42-d745-495c-b068-7ae04e50ee4a","Type":"ContainerDied","Data":"2d88ca049cf872ca4d9673e3d13c6955529af8d3ed52913c4a49e3170645097a"} Dec 09 15:34:25 crc kubenswrapper[4716]: I1209 15:34:25.912443 4716 scope.go:117] "RemoveContainer" containerID="87514f11222283636c35f18fdc67223266921398d321468f28ae5121adc39bed" Dec 09 15:34:25 crc kubenswrapper[4716]: I1209 15:34:25.912475 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 15:34:25 crc kubenswrapper[4716]: I1209 15:34:25.942194 4716 scope.go:117] "RemoveContainer" containerID="515154073b07651cc65c5bc467c01016e8b1d94653fd2b5860e4c584f5079e2c" Dec 09 15:34:25 crc kubenswrapper[4716]: I1209 15:34:25.969962 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:34:25 crc kubenswrapper[4716]: I1209 15:34:25.975829 4716 scope.go:117] "RemoveContainer" containerID="91ea182ae177ab8f6bb25df350936378b78bb701d228df700638dd1aab931a37" Dec 09 15:34:25 crc kubenswrapper[4716]: I1209 15:34:25.987809 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.003161 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:34:26 crc kubenswrapper[4716]: E1209 15:34:26.004954 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d328b5d1-8e4d-4a23-b326-4ab0fd436482" containerName="heat-api" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.004982 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="d328b5d1-8e4d-4a23-b326-4ab0fd436482" containerName="heat-api" Dec 09 15:34:26 crc kubenswrapper[4716]: E1209 15:34:26.005001 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2468f42-d745-495c-b068-7ae04e50ee4a" containerName="proxy-httpd" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.005008 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2468f42-d745-495c-b068-7ae04e50ee4a" containerName="proxy-httpd" Dec 09 15:34:26 crc kubenswrapper[4716]: E1209 15:34:26.005025 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="917d6c5b-6e8a-4ef6-9057-41a76202707a" containerName="heat-cfnapi" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.005033 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="917d6c5b-6e8a-4ef6-9057-41a76202707a" containerName="heat-cfnapi" Dec 09 15:34:26 crc kubenswrapper[4716]: E1209 15:34:26.005046 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89424e07-4a8d-47e7-b0f0-4d5161773f94" containerName="init" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.005051 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="89424e07-4a8d-47e7-b0f0-4d5161773f94" containerName="init" Dec 09 15:34:26 crc kubenswrapper[4716]: E1209 15:34:26.005068 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="917d6c5b-6e8a-4ef6-9057-41a76202707a" containerName="heat-cfnapi" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.005074 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="917d6c5b-6e8a-4ef6-9057-41a76202707a" containerName="heat-cfnapi" Dec 09 15:34:26 crc kubenswrapper[4716]: E1209 15:34:26.005084 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9202dd4-4544-4a66-a4da-160c8c918cac" containerName="heat-cfnapi" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.005089 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9202dd4-4544-4a66-a4da-160c8c918cac" containerName="heat-cfnapi" Dec 09 15:34:26 crc kubenswrapper[4716]: E1209 15:34:26.005104 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2468f42-d745-495c-b068-7ae04e50ee4a" containerName="ceilometer-central-agent" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.005112 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2468f42-d745-495c-b068-7ae04e50ee4a" containerName="ceilometer-central-agent" Dec 09 15:34:26 crc kubenswrapper[4716]: E1209 15:34:26.005121 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2468f42-d745-495c-b068-7ae04e50ee4a" containerName="ceilometer-notification-agent" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.005127 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2468f42-d745-495c-b068-7ae04e50ee4a" containerName="ceilometer-notification-agent" Dec 09 15:34:26 crc kubenswrapper[4716]: E1209 15:34:26.005139 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89424e07-4a8d-47e7-b0f0-4d5161773f94" containerName="dnsmasq-dns" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.005144 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="89424e07-4a8d-47e7-b0f0-4d5161773f94" containerName="dnsmasq-dns" Dec 09 15:34:26 crc kubenswrapper[4716]: E1209 15:34:26.005155 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0595a046-577b-4894-8f05-36663a2a4c64" containerName="heat-api" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.005161 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="0595a046-577b-4894-8f05-36663a2a4c64" containerName="heat-api" Dec 09 15:34:26 crc kubenswrapper[4716]: E1209 15:34:26.005181 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2468f42-d745-495c-b068-7ae04e50ee4a" containerName="sg-core" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.005187 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2468f42-d745-495c-b068-7ae04e50ee4a" containerName="sg-core" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.005397 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2468f42-d745-495c-b068-7ae04e50ee4a" containerName="ceilometer-notification-agent" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.005411 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="d328b5d1-8e4d-4a23-b326-4ab0fd436482" containerName="heat-api" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.005421 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="d328b5d1-8e4d-4a23-b326-4ab0fd436482" containerName="heat-api" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.005429 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="917d6c5b-6e8a-4ef6-9057-41a76202707a" containerName="heat-cfnapi" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.005435 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="917d6c5b-6e8a-4ef6-9057-41a76202707a" containerName="heat-cfnapi" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.005443 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="0595a046-577b-4894-8f05-36663a2a4c64" containerName="heat-api" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.005456 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="89424e07-4a8d-47e7-b0f0-4d5161773f94" containerName="dnsmasq-dns" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.005466 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2468f42-d745-495c-b068-7ae04e50ee4a" containerName="sg-core" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.005478 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2468f42-d745-495c-b068-7ae04e50ee4a" containerName="ceilometer-central-agent" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.005486 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9202dd4-4544-4a66-a4da-160c8c918cac" containerName="heat-cfnapi" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.005498 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2468f42-d745-495c-b068-7ae04e50ee4a" containerName="proxy-httpd" Dec 09 15:34:26 crc kubenswrapper[4716]: E1209 15:34:26.005795 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d328b5d1-8e4d-4a23-b326-4ab0fd436482" containerName="heat-api" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.005810 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="d328b5d1-8e4d-4a23-b326-4ab0fd436482" containerName="heat-api" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.008361 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.017220 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.022273 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.022398 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.040739 4716 scope.go:117] "RemoveContainer" containerID="71a6de0e1d0befadb78dd44bcdd9582815fde4416a09dc9cc5bd6d85eb645fe1" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.110472 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-run-httpd\") pod \"ceilometer-0\" (UID: \"4af083f7-0a3a-4e5a-90f9-2a7f86fec042\") " pod="openstack/ceilometer-0" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.110883 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-scripts\") pod \"ceilometer-0\" (UID: \"4af083f7-0a3a-4e5a-90f9-2a7f86fec042\") " pod="openstack/ceilometer-0" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.111219 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4af083f7-0a3a-4e5a-90f9-2a7f86fec042\") " pod="openstack/ceilometer-0" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.111421 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-config-data\") pod \"ceilometer-0\" (UID: \"4af083f7-0a3a-4e5a-90f9-2a7f86fec042\") " pod="openstack/ceilometer-0" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.111577 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6b2h\" (UniqueName: \"kubernetes.io/projected/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-kube-api-access-g6b2h\") pod \"ceilometer-0\" (UID: \"4af083f7-0a3a-4e5a-90f9-2a7f86fec042\") " pod="openstack/ceilometer-0" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.111741 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-log-httpd\") pod \"ceilometer-0\" (UID: \"4af083f7-0a3a-4e5a-90f9-2a7f86fec042\") " pod="openstack/ceilometer-0" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.111896 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4af083f7-0a3a-4e5a-90f9-2a7f86fec042\") " pod="openstack/ceilometer-0" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.220285 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4af083f7-0a3a-4e5a-90f9-2a7f86fec042\") " pod="openstack/ceilometer-0" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.220386 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-config-data\") pod \"ceilometer-0\" (UID: \"4af083f7-0a3a-4e5a-90f9-2a7f86fec042\") " pod="openstack/ceilometer-0" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.220446 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g6b2h\" (UniqueName: \"kubernetes.io/projected/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-kube-api-access-g6b2h\") pod \"ceilometer-0\" (UID: \"4af083f7-0a3a-4e5a-90f9-2a7f86fec042\") " pod="openstack/ceilometer-0" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.220490 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-log-httpd\") pod \"ceilometer-0\" (UID: \"4af083f7-0a3a-4e5a-90f9-2a7f86fec042\") " pod="openstack/ceilometer-0" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.220549 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4af083f7-0a3a-4e5a-90f9-2a7f86fec042\") " pod="openstack/ceilometer-0" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.220590 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-run-httpd\") pod \"ceilometer-0\" (UID: \"4af083f7-0a3a-4e5a-90f9-2a7f86fec042\") " pod="openstack/ceilometer-0" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.220634 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-scripts\") pod \"ceilometer-0\" (UID: \"4af083f7-0a3a-4e5a-90f9-2a7f86fec042\") " pod="openstack/ceilometer-0" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.222193 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-log-httpd\") pod \"ceilometer-0\" (UID: \"4af083f7-0a3a-4e5a-90f9-2a7f86fec042\") " pod="openstack/ceilometer-0" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.222440 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-run-httpd\") pod \"ceilometer-0\" (UID: \"4af083f7-0a3a-4e5a-90f9-2a7f86fec042\") " pod="openstack/ceilometer-0" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.229494 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4af083f7-0a3a-4e5a-90f9-2a7f86fec042\") " pod="openstack/ceilometer-0" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.230017 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4af083f7-0a3a-4e5a-90f9-2a7f86fec042\") " pod="openstack/ceilometer-0" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.231182 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-config-data\") pod \"ceilometer-0\" (UID: \"4af083f7-0a3a-4e5a-90f9-2a7f86fec042\") " pod="openstack/ceilometer-0" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.231708 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-scripts\") pod \"ceilometer-0\" (UID: \"4af083f7-0a3a-4e5a-90f9-2a7f86fec042\") " pod="openstack/ceilometer-0" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.245340 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g6b2h\" (UniqueName: \"kubernetes.io/projected/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-kube-api-access-g6b2h\") pod \"ceilometer-0\" (UID: \"4af083f7-0a3a-4e5a-90f9-2a7f86fec042\") " pod="openstack/ceilometer-0" Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.348694 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 15:34:26 crc kubenswrapper[4716]: E1209 15:34:26.823353 4716 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="71b4f9b02e1d185c13525910655145a0e1b13a012bd4ead62a4f7aed5f7719f6" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Dec 09 15:34:26 crc kubenswrapper[4716]: E1209 15:34:26.825021 4716 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="71b4f9b02e1d185c13525910655145a0e1b13a012bd4ead62a4f7aed5f7719f6" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Dec 09 15:34:26 crc kubenswrapper[4716]: E1209 15:34:26.829165 4716 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="71b4f9b02e1d185c13525910655145a0e1b13a012bd4ead62a4f7aed5f7719f6" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Dec 09 15:34:26 crc kubenswrapper[4716]: E1209 15:34:26.829249 4716 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-84cd58648f-wb5xh" podUID="b6b6ca6f-0338-48e9-9397-61cf76346b26" containerName="heat-engine" Dec 09 15:34:26 crc kubenswrapper[4716]: W1209 15:34:26.921785 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4af083f7_0a3a_4e5a_90f9_2a7f86fec042.slice/crio-741256c0a2ba627799481cd7c98bc5b861a124067da65e4d30fc6e4861b8d19f WatchSource:0}: Error finding container 741256c0a2ba627799481cd7c98bc5b861a124067da65e4d30fc6e4861b8d19f: Status 404 returned error can't find the container with id 741256c0a2ba627799481cd7c98bc5b861a124067da65e4d30fc6e4861b8d19f Dec 09 15:34:26 crc kubenswrapper[4716]: I1209 15:34:26.951317 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:34:27 crc kubenswrapper[4716]: I1209 15:34:27.226612 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c2468f42-d745-495c-b068-7ae04e50ee4a" path="/var/lib/kubelet/pods/c2468f42-d745-495c-b068-7ae04e50ee4a/volumes" Dec 09 15:34:27 crc kubenswrapper[4716]: I1209 15:34:27.504812 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-448gp"] Dec 09 15:34:27 crc kubenswrapper[4716]: I1209 15:34:27.506467 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-448gp" Dec 09 15:34:27 crc kubenswrapper[4716]: I1209 15:34:27.517861 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-448gp"] Dec 09 15:34:27 crc kubenswrapper[4716]: I1209 15:34:27.672226 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vmc5r\" (UniqueName: \"kubernetes.io/projected/34faeba3-0fa7-439c-888d-98bc7ec01369-kube-api-access-vmc5r\") pod \"nova-api-db-create-448gp\" (UID: \"34faeba3-0fa7-439c-888d-98bc7ec01369\") " pod="openstack/nova-api-db-create-448gp" Dec 09 15:34:27 crc kubenswrapper[4716]: I1209 15:34:27.672636 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/34faeba3-0fa7-439c-888d-98bc7ec01369-operator-scripts\") pod \"nova-api-db-create-448gp\" (UID: \"34faeba3-0fa7-439c-888d-98bc7ec01369\") " pod="openstack/nova-api-db-create-448gp" Dec 09 15:34:27 crc kubenswrapper[4716]: I1209 15:34:27.704909 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-6jgz9"] Dec 09 15:34:27 crc kubenswrapper[4716]: I1209 15:34:27.706470 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-6jgz9" Dec 09 15:34:27 crc kubenswrapper[4716]: I1209 15:34:27.721079 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-6jgz9"] Dec 09 15:34:27 crc kubenswrapper[4716]: I1209 15:34:27.743574 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-7a56-account-create-update-shpjz"] Dec 09 15:34:27 crc kubenswrapper[4716]: I1209 15:34:27.745919 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-7a56-account-create-update-shpjz" Dec 09 15:34:27 crc kubenswrapper[4716]: I1209 15:34:27.765691 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-7a56-account-create-update-shpjz"] Dec 09 15:34:27 crc kubenswrapper[4716]: I1209 15:34:27.772611 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Dec 09 15:34:27 crc kubenswrapper[4716]: I1209 15:34:27.776152 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/34faeba3-0fa7-439c-888d-98bc7ec01369-operator-scripts\") pod \"nova-api-db-create-448gp\" (UID: \"34faeba3-0fa7-439c-888d-98bc7ec01369\") " pod="openstack/nova-api-db-create-448gp" Dec 09 15:34:27 crc kubenswrapper[4716]: I1209 15:34:27.776396 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vmc5r\" (UniqueName: \"kubernetes.io/projected/34faeba3-0fa7-439c-888d-98bc7ec01369-kube-api-access-vmc5r\") pod \"nova-api-db-create-448gp\" (UID: \"34faeba3-0fa7-439c-888d-98bc7ec01369\") " pod="openstack/nova-api-db-create-448gp" Dec 09 15:34:27 crc kubenswrapper[4716]: I1209 15:34:27.779513 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/34faeba3-0fa7-439c-888d-98bc7ec01369-operator-scripts\") pod \"nova-api-db-create-448gp\" (UID: \"34faeba3-0fa7-439c-888d-98bc7ec01369\") " pod="openstack/nova-api-db-create-448gp" Dec 09 15:34:27 crc kubenswrapper[4716]: I1209 15:34:27.822263 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vmc5r\" (UniqueName: \"kubernetes.io/projected/34faeba3-0fa7-439c-888d-98bc7ec01369-kube-api-access-vmc5r\") pod \"nova-api-db-create-448gp\" (UID: \"34faeba3-0fa7-439c-888d-98bc7ec01369\") " pod="openstack/nova-api-db-create-448gp" Dec 09 15:34:27 crc kubenswrapper[4716]: I1209 15:34:27.829968 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-7j5l9"] Dec 09 15:34:27 crc kubenswrapper[4716]: I1209 15:34:27.832002 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-7j5l9" Dec 09 15:34:27 crc kubenswrapper[4716]: I1209 15:34:27.855590 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-7j5l9"] Dec 09 15:34:27 crc kubenswrapper[4716]: I1209 15:34:27.881782 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z8zvj\" (UniqueName: \"kubernetes.io/projected/ed21be51-d0c7-4c2b-ac68-c22f8664dea8-kube-api-access-z8zvj\") pod \"nova-cell0-db-create-6jgz9\" (UID: \"ed21be51-d0c7-4c2b-ac68-c22f8664dea8\") " pod="openstack/nova-cell0-db-create-6jgz9" Dec 09 15:34:27 crc kubenswrapper[4716]: I1209 15:34:27.882186 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ed21be51-d0c7-4c2b-ac68-c22f8664dea8-operator-scripts\") pod \"nova-cell0-db-create-6jgz9\" (UID: \"ed21be51-d0c7-4c2b-ac68-c22f8664dea8\") " pod="openstack/nova-cell0-db-create-6jgz9" Dec 09 15:34:27 crc kubenswrapper[4716]: I1209 15:34:27.882378 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-82rm7\" (UniqueName: \"kubernetes.io/projected/2ed85f3c-c70d-4400-8e17-28f12f0a4dbb-kube-api-access-82rm7\") pod \"nova-api-7a56-account-create-update-shpjz\" (UID: \"2ed85f3c-c70d-4400-8e17-28f12f0a4dbb\") " pod="openstack/nova-api-7a56-account-create-update-shpjz" Dec 09 15:34:27 crc kubenswrapper[4716]: I1209 15:34:27.882573 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2ed85f3c-c70d-4400-8e17-28f12f0a4dbb-operator-scripts\") pod \"nova-api-7a56-account-create-update-shpjz\" (UID: \"2ed85f3c-c70d-4400-8e17-28f12f0a4dbb\") " pod="openstack/nova-api-7a56-account-create-update-shpjz" Dec 09 15:34:27 crc kubenswrapper[4716]: I1209 15:34:27.933262 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-448gp" Dec 09 15:34:27 crc kubenswrapper[4716]: I1209 15:34:27.966275 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4af083f7-0a3a-4e5a-90f9-2a7f86fec042","Type":"ContainerStarted","Data":"7bcd9a422e3f6c94879f6d05238825cd8a88efbbb064ea62af0a135bfb6b7b0b"} Dec 09 15:34:27 crc kubenswrapper[4716]: I1209 15:34:27.966325 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4af083f7-0a3a-4e5a-90f9-2a7f86fec042","Type":"ContainerStarted","Data":"741256c0a2ba627799481cd7c98bc5b861a124067da65e4d30fc6e4861b8d19f"} Dec 09 15:34:27 crc kubenswrapper[4716]: I1209 15:34:27.985569 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ed21be51-d0c7-4c2b-ac68-c22f8664dea8-operator-scripts\") pod \"nova-cell0-db-create-6jgz9\" (UID: \"ed21be51-d0c7-4c2b-ac68-c22f8664dea8\") " pod="openstack/nova-cell0-db-create-6jgz9" Dec 09 15:34:27 crc kubenswrapper[4716]: I1209 15:34:27.985689 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-82rm7\" (UniqueName: \"kubernetes.io/projected/2ed85f3c-c70d-4400-8e17-28f12f0a4dbb-kube-api-access-82rm7\") pod \"nova-api-7a56-account-create-update-shpjz\" (UID: \"2ed85f3c-c70d-4400-8e17-28f12f0a4dbb\") " pod="openstack/nova-api-7a56-account-create-update-shpjz" Dec 09 15:34:27 crc kubenswrapper[4716]: I1209 15:34:27.985757 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2ed85f3c-c70d-4400-8e17-28f12f0a4dbb-operator-scripts\") pod \"nova-api-7a56-account-create-update-shpjz\" (UID: \"2ed85f3c-c70d-4400-8e17-28f12f0a4dbb\") " pod="openstack/nova-api-7a56-account-create-update-shpjz" Dec 09 15:34:27 crc kubenswrapper[4716]: I1209 15:34:27.985816 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3e5ab44d-6c6f-46d2-829d-a3699d131e2d-operator-scripts\") pod \"nova-cell1-db-create-7j5l9\" (UID: \"3e5ab44d-6c6f-46d2-829d-a3699d131e2d\") " pod="openstack/nova-cell1-db-create-7j5l9" Dec 09 15:34:27 crc kubenswrapper[4716]: I1209 15:34:27.985874 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-578f9\" (UniqueName: \"kubernetes.io/projected/3e5ab44d-6c6f-46d2-829d-a3699d131e2d-kube-api-access-578f9\") pod \"nova-cell1-db-create-7j5l9\" (UID: \"3e5ab44d-6c6f-46d2-829d-a3699d131e2d\") " pod="openstack/nova-cell1-db-create-7j5l9" Dec 09 15:34:27 crc kubenswrapper[4716]: I1209 15:34:27.985986 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z8zvj\" (UniqueName: \"kubernetes.io/projected/ed21be51-d0c7-4c2b-ac68-c22f8664dea8-kube-api-access-z8zvj\") pod \"nova-cell0-db-create-6jgz9\" (UID: \"ed21be51-d0c7-4c2b-ac68-c22f8664dea8\") " pod="openstack/nova-cell0-db-create-6jgz9" Dec 09 15:34:27 crc kubenswrapper[4716]: I1209 15:34:27.986672 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2ed85f3c-c70d-4400-8e17-28f12f0a4dbb-operator-scripts\") pod \"nova-api-7a56-account-create-update-shpjz\" (UID: \"2ed85f3c-c70d-4400-8e17-28f12f0a4dbb\") " pod="openstack/nova-api-7a56-account-create-update-shpjz" Dec 09 15:34:27 crc kubenswrapper[4716]: I1209 15:34:27.987037 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ed21be51-d0c7-4c2b-ac68-c22f8664dea8-operator-scripts\") pod \"nova-cell0-db-create-6jgz9\" (UID: \"ed21be51-d0c7-4c2b-ac68-c22f8664dea8\") " pod="openstack/nova-cell0-db-create-6jgz9" Dec 09 15:34:27 crc kubenswrapper[4716]: I1209 15:34:27.992412 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-30c9-account-create-update-vtv2g"] Dec 09 15:34:27 crc kubenswrapper[4716]: I1209 15:34:27.994442 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-30c9-account-create-update-vtv2g" Dec 09 15:34:28 crc kubenswrapper[4716]: I1209 15:34:28.010142 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Dec 09 15:34:28 crc kubenswrapper[4716]: I1209 15:34:28.022095 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z8zvj\" (UniqueName: \"kubernetes.io/projected/ed21be51-d0c7-4c2b-ac68-c22f8664dea8-kube-api-access-z8zvj\") pod \"nova-cell0-db-create-6jgz9\" (UID: \"ed21be51-d0c7-4c2b-ac68-c22f8664dea8\") " pod="openstack/nova-cell0-db-create-6jgz9" Dec 09 15:34:28 crc kubenswrapper[4716]: I1209 15:34:28.025605 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-82rm7\" (UniqueName: \"kubernetes.io/projected/2ed85f3c-c70d-4400-8e17-28f12f0a4dbb-kube-api-access-82rm7\") pod \"nova-api-7a56-account-create-update-shpjz\" (UID: \"2ed85f3c-c70d-4400-8e17-28f12f0a4dbb\") " pod="openstack/nova-api-7a56-account-create-update-shpjz" Dec 09 15:34:28 crc kubenswrapper[4716]: I1209 15:34:28.042234 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-6jgz9" Dec 09 15:34:28 crc kubenswrapper[4716]: I1209 15:34:28.062704 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-30c9-account-create-update-vtv2g"] Dec 09 15:34:28 crc kubenswrapper[4716]: I1209 15:34:28.091102 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3e5ab44d-6c6f-46d2-829d-a3699d131e2d-operator-scripts\") pod \"nova-cell1-db-create-7j5l9\" (UID: \"3e5ab44d-6c6f-46d2-829d-a3699d131e2d\") " pod="openstack/nova-cell1-db-create-7j5l9" Dec 09 15:34:28 crc kubenswrapper[4716]: I1209 15:34:28.091189 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-578f9\" (UniqueName: \"kubernetes.io/projected/3e5ab44d-6c6f-46d2-829d-a3699d131e2d-kube-api-access-578f9\") pod \"nova-cell1-db-create-7j5l9\" (UID: \"3e5ab44d-6c6f-46d2-829d-a3699d131e2d\") " pod="openstack/nova-cell1-db-create-7j5l9" Dec 09 15:34:28 crc kubenswrapper[4716]: I1209 15:34:28.091225 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/35c52dde-aad6-413a-9d1f-5bd7078372b4-operator-scripts\") pod \"nova-cell0-30c9-account-create-update-vtv2g\" (UID: \"35c52dde-aad6-413a-9d1f-5bd7078372b4\") " pod="openstack/nova-cell0-30c9-account-create-update-vtv2g" Dec 09 15:34:28 crc kubenswrapper[4716]: I1209 15:34:28.091303 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qszgt\" (UniqueName: \"kubernetes.io/projected/35c52dde-aad6-413a-9d1f-5bd7078372b4-kube-api-access-qszgt\") pod \"nova-cell0-30c9-account-create-update-vtv2g\" (UID: \"35c52dde-aad6-413a-9d1f-5bd7078372b4\") " pod="openstack/nova-cell0-30c9-account-create-update-vtv2g" Dec 09 15:34:28 crc kubenswrapper[4716]: I1209 15:34:28.091898 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3e5ab44d-6c6f-46d2-829d-a3699d131e2d-operator-scripts\") pod \"nova-cell1-db-create-7j5l9\" (UID: \"3e5ab44d-6c6f-46d2-829d-a3699d131e2d\") " pod="openstack/nova-cell1-db-create-7j5l9" Dec 09 15:34:28 crc kubenswrapper[4716]: I1209 15:34:28.092446 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-7a56-account-create-update-shpjz" Dec 09 15:34:28 crc kubenswrapper[4716]: I1209 15:34:28.140065 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-578f9\" (UniqueName: \"kubernetes.io/projected/3e5ab44d-6c6f-46d2-829d-a3699d131e2d-kube-api-access-578f9\") pod \"nova-cell1-db-create-7j5l9\" (UID: \"3e5ab44d-6c6f-46d2-829d-a3699d131e2d\") " pod="openstack/nova-cell1-db-create-7j5l9" Dec 09 15:34:28 crc kubenswrapper[4716]: I1209 15:34:28.194011 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/35c52dde-aad6-413a-9d1f-5bd7078372b4-operator-scripts\") pod \"nova-cell0-30c9-account-create-update-vtv2g\" (UID: \"35c52dde-aad6-413a-9d1f-5bd7078372b4\") " pod="openstack/nova-cell0-30c9-account-create-update-vtv2g" Dec 09 15:34:28 crc kubenswrapper[4716]: I1209 15:34:28.194143 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qszgt\" (UniqueName: \"kubernetes.io/projected/35c52dde-aad6-413a-9d1f-5bd7078372b4-kube-api-access-qszgt\") pod \"nova-cell0-30c9-account-create-update-vtv2g\" (UID: \"35c52dde-aad6-413a-9d1f-5bd7078372b4\") " pod="openstack/nova-cell0-30c9-account-create-update-vtv2g" Dec 09 15:34:28 crc kubenswrapper[4716]: I1209 15:34:28.195419 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/35c52dde-aad6-413a-9d1f-5bd7078372b4-operator-scripts\") pod \"nova-cell0-30c9-account-create-update-vtv2g\" (UID: \"35c52dde-aad6-413a-9d1f-5bd7078372b4\") " pod="openstack/nova-cell0-30c9-account-create-update-vtv2g" Dec 09 15:34:28 crc kubenswrapper[4716]: I1209 15:34:28.211084 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-7j5l9" Dec 09 15:34:28 crc kubenswrapper[4716]: I1209 15:34:28.245321 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-e5db-account-create-update-gk6pn"] Dec 09 15:34:28 crc kubenswrapper[4716]: I1209 15:34:28.247520 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qszgt\" (UniqueName: \"kubernetes.io/projected/35c52dde-aad6-413a-9d1f-5bd7078372b4-kube-api-access-qszgt\") pod \"nova-cell0-30c9-account-create-update-vtv2g\" (UID: \"35c52dde-aad6-413a-9d1f-5bd7078372b4\") " pod="openstack/nova-cell0-30c9-account-create-update-vtv2g" Dec 09 15:34:28 crc kubenswrapper[4716]: I1209 15:34:28.250426 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-e5db-account-create-update-gk6pn" Dec 09 15:34:28 crc kubenswrapper[4716]: I1209 15:34:28.264104 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Dec 09 15:34:28 crc kubenswrapper[4716]: I1209 15:34:28.289440 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-e5db-account-create-update-gk6pn"] Dec 09 15:34:28 crc kubenswrapper[4716]: I1209 15:34:28.412168 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-30c9-account-create-update-vtv2g" Dec 09 15:34:28 crc kubenswrapper[4716]: I1209 15:34:28.438336 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mb4wn\" (UniqueName: \"kubernetes.io/projected/a0262168-a0cf-42c8-9ec5-cdb237c71db3-kube-api-access-mb4wn\") pod \"nova-cell1-e5db-account-create-update-gk6pn\" (UID: \"a0262168-a0cf-42c8-9ec5-cdb237c71db3\") " pod="openstack/nova-cell1-e5db-account-create-update-gk6pn" Dec 09 15:34:28 crc kubenswrapper[4716]: I1209 15:34:28.438486 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a0262168-a0cf-42c8-9ec5-cdb237c71db3-operator-scripts\") pod \"nova-cell1-e5db-account-create-update-gk6pn\" (UID: \"a0262168-a0cf-42c8-9ec5-cdb237c71db3\") " pod="openstack/nova-cell1-e5db-account-create-update-gk6pn" Dec 09 15:34:28 crc kubenswrapper[4716]: I1209 15:34:28.540811 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mb4wn\" (UniqueName: \"kubernetes.io/projected/a0262168-a0cf-42c8-9ec5-cdb237c71db3-kube-api-access-mb4wn\") pod \"nova-cell1-e5db-account-create-update-gk6pn\" (UID: \"a0262168-a0cf-42c8-9ec5-cdb237c71db3\") " pod="openstack/nova-cell1-e5db-account-create-update-gk6pn" Dec 09 15:34:28 crc kubenswrapper[4716]: I1209 15:34:28.541303 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a0262168-a0cf-42c8-9ec5-cdb237c71db3-operator-scripts\") pod \"nova-cell1-e5db-account-create-update-gk6pn\" (UID: \"a0262168-a0cf-42c8-9ec5-cdb237c71db3\") " pod="openstack/nova-cell1-e5db-account-create-update-gk6pn" Dec 09 15:34:28 crc kubenswrapper[4716]: I1209 15:34:28.546285 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a0262168-a0cf-42c8-9ec5-cdb237c71db3-operator-scripts\") pod \"nova-cell1-e5db-account-create-update-gk6pn\" (UID: \"a0262168-a0cf-42c8-9ec5-cdb237c71db3\") " pod="openstack/nova-cell1-e5db-account-create-update-gk6pn" Dec 09 15:34:28 crc kubenswrapper[4716]: I1209 15:34:28.595366 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mb4wn\" (UniqueName: \"kubernetes.io/projected/a0262168-a0cf-42c8-9ec5-cdb237c71db3-kube-api-access-mb4wn\") pod \"nova-cell1-e5db-account-create-update-gk6pn\" (UID: \"a0262168-a0cf-42c8-9ec5-cdb237c71db3\") " pod="openstack/nova-cell1-e5db-account-create-update-gk6pn" Dec 09 15:34:28 crc kubenswrapper[4716]: I1209 15:34:28.714957 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-e5db-account-create-update-gk6pn" Dec 09 15:34:29 crc kubenswrapper[4716]: I1209 15:34:29.009241 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-6jgz9"] Dec 09 15:34:29 crc kubenswrapper[4716]: I1209 15:34:29.059751 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4af083f7-0a3a-4e5a-90f9-2a7f86fec042","Type":"ContainerStarted","Data":"bf9d572dbce63d3ce353ec411aab2b7be0f0a93d4e21405e44472322334d26bd"} Dec 09 15:34:29 crc kubenswrapper[4716]: I1209 15:34:29.166829 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-448gp"] Dec 09 15:34:29 crc kubenswrapper[4716]: I1209 15:34:29.485808 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-7a56-account-create-update-shpjz"] Dec 09 15:34:29 crc kubenswrapper[4716]: I1209 15:34:29.624355 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-7j5l9"] Dec 09 15:34:29 crc kubenswrapper[4716]: I1209 15:34:29.719773 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-30c9-account-create-update-vtv2g"] Dec 09 15:34:29 crc kubenswrapper[4716]: I1209 15:34:29.729448 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-e5db-account-create-update-gk6pn"] Dec 09 15:34:30 crc kubenswrapper[4716]: I1209 15:34:30.111733 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-448gp" event={"ID":"34faeba3-0fa7-439c-888d-98bc7ec01369","Type":"ContainerStarted","Data":"79cf87c2a5c48521ab559f03622394647a4573e3926b0ae5e46e6b0c396dfbb6"} Dec 09 15:34:30 crc kubenswrapper[4716]: I1209 15:34:30.111796 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-448gp" event={"ID":"34faeba3-0fa7-439c-888d-98bc7ec01369","Type":"ContainerStarted","Data":"f67563aafbc566dfbf1af541407782cba6bdf7b9bdaf36816b1c4432c8b1e420"} Dec 09 15:34:30 crc kubenswrapper[4716]: I1209 15:34:30.136479 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-e5db-account-create-update-gk6pn" event={"ID":"a0262168-a0cf-42c8-9ec5-cdb237c71db3","Type":"ContainerStarted","Data":"67b0b43296ec6b50302a2bf5f02cf97bbc69733172ccc4d8160c8e022fd3f527"} Dec 09 15:34:30 crc kubenswrapper[4716]: I1209 15:34:30.136539 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-e5db-account-create-update-gk6pn" event={"ID":"a0262168-a0cf-42c8-9ec5-cdb237c71db3","Type":"ContainerStarted","Data":"21369d44d8b02aac3e342b280b4cc2e655add83ce4b005c5b0fd0cb5303fc3bc"} Dec 09 15:34:30 crc kubenswrapper[4716]: I1209 15:34:30.142174 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-6jgz9" event={"ID":"ed21be51-d0c7-4c2b-ac68-c22f8664dea8","Type":"ContainerStarted","Data":"b8b37f2265ddc9bdd7fee3e78d96e3fb58abd7ce772c979f4dc7853e16e9018a"} Dec 09 15:34:30 crc kubenswrapper[4716]: I1209 15:34:30.142227 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-6jgz9" event={"ID":"ed21be51-d0c7-4c2b-ac68-c22f8664dea8","Type":"ContainerStarted","Data":"fd24339a682bf564eec6e77e42eaaef5dac8851322ec355888029b326eda2b7d"} Dec 09 15:34:30 crc kubenswrapper[4716]: I1209 15:34:30.158982 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-db-create-448gp" podStartSLOduration=3.158958184 podStartE2EDuration="3.158958184s" podCreationTimestamp="2025-12-09 15:34:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:34:30.144178088 +0000 UTC m=+1557.298922086" watchObservedRunningTime="2025-12-09 15:34:30.158958184 +0000 UTC m=+1557.313702172" Dec 09 15:34:30 crc kubenswrapper[4716]: I1209 15:34:30.162783 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-30c9-account-create-update-vtv2g" event={"ID":"35c52dde-aad6-413a-9d1f-5bd7078372b4","Type":"ContainerStarted","Data":"d5ea2f329b5face113ee9b7f45d706f93184ac67550a181dd64cfe075ca17459"} Dec 09 15:34:30 crc kubenswrapper[4716]: I1209 15:34:30.162838 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-30c9-account-create-update-vtv2g" event={"ID":"35c52dde-aad6-413a-9d1f-5bd7078372b4","Type":"ContainerStarted","Data":"8a1e3bacecafa72bcb89b2e0d543212ebb713d2a4e29298f1399db60f7c6641e"} Dec 09 15:34:30 crc kubenswrapper[4716]: I1209 15:34:30.187577 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-7j5l9" event={"ID":"3e5ab44d-6c6f-46d2-829d-a3699d131e2d","Type":"ContainerStarted","Data":"944e2b0ba86a1ee0740e595edc8a7f14470317f47f4d621d65711a13e2562075"} Dec 09 15:34:30 crc kubenswrapper[4716]: I1209 15:34:30.187642 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-7j5l9" event={"ID":"3e5ab44d-6c6f-46d2-829d-a3699d131e2d","Type":"ContainerStarted","Data":"375ef45b926bd8c041a5001b199e7b8d57330b6328fc56bd99de71f580a6e4fa"} Dec 09 15:34:30 crc kubenswrapper[4716]: I1209 15:34:30.190415 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-db-create-6jgz9" podStartSLOduration=3.19039374 podStartE2EDuration="3.19039374s" podCreationTimestamp="2025-12-09 15:34:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:34:30.174533873 +0000 UTC m=+1557.329277871" watchObservedRunningTime="2025-12-09 15:34:30.19039374 +0000 UTC m=+1557.345137728" Dec 09 15:34:30 crc kubenswrapper[4716]: I1209 15:34:30.207880 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-7a56-account-create-update-shpjz" event={"ID":"2ed85f3c-c70d-4400-8e17-28f12f0a4dbb","Type":"ContainerStarted","Data":"b68e0ace6fd0a5bf7ff95ee992b7aa2eed76561ada598c44b469dd9e698ead2b"} Dec 09 15:34:30 crc kubenswrapper[4716]: I1209 15:34:30.207935 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-7a56-account-create-update-shpjz" event={"ID":"2ed85f3c-c70d-4400-8e17-28f12f0a4dbb","Type":"ContainerStarted","Data":"b06a85b4e00dda5dbf106eb25f610c16cbaecf156ba3bb03ffcc749447c002dd"} Dec 09 15:34:30 crc kubenswrapper[4716]: I1209 15:34:30.214046 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-e5db-account-create-update-gk6pn" podStartSLOduration=2.2140232810000002 podStartE2EDuration="2.214023281s" podCreationTimestamp="2025-12-09 15:34:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:34:30.193134879 +0000 UTC m=+1557.347878867" watchObservedRunningTime="2025-12-09 15:34:30.214023281 +0000 UTC m=+1557.368767269" Dec 09 15:34:30 crc kubenswrapper[4716]: I1209 15:34:30.236070 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-db-create-7j5l9" podStartSLOduration=3.236043956 podStartE2EDuration="3.236043956s" podCreationTimestamp="2025-12-09 15:34:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:34:30.218223472 +0000 UTC m=+1557.372967460" watchObservedRunningTime="2025-12-09 15:34:30.236043956 +0000 UTC m=+1557.390787954" Dec 09 15:34:30 crc kubenswrapper[4716]: I1209 15:34:30.249699 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-30c9-account-create-update-vtv2g" podStartSLOduration=3.249673119 podStartE2EDuration="3.249673119s" podCreationTimestamp="2025-12-09 15:34:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:34:30.241209005 +0000 UTC m=+1557.395952993" watchObservedRunningTime="2025-12-09 15:34:30.249673119 +0000 UTC m=+1557.404417107" Dec 09 15:34:30 crc kubenswrapper[4716]: I1209 15:34:30.293834 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-7a56-account-create-update-shpjz" podStartSLOduration=3.293808286 podStartE2EDuration="3.293808286s" podCreationTimestamp="2025-12-09 15:34:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:34:30.271579929 +0000 UTC m=+1557.426323917" watchObservedRunningTime="2025-12-09 15:34:30.293808286 +0000 UTC m=+1557.448552274" Dec 09 15:34:30 crc kubenswrapper[4716]: W1209 15:34:30.859882 4716 container.go:586] Failed to update stats for container "/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb6b6ca6f_0338_48e9_9397_61cf76346b26.slice/crio-4ac307b276ca1515e9e9ac091c095779101555a0cba2ae6cc95f769bbfa1e629": error while statting cgroup v2: [openat2 /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb6b6ca6f_0338_48e9_9397_61cf76346b26.slice/crio-4ac307b276ca1515e9e9ac091c095779101555a0cba2ae6cc95f769bbfa1e629/pids.current: no such device], continuing to push stats Dec 09 15:34:31 crc kubenswrapper[4716]: I1209 15:34:31.252310 4716 generic.go:334] "Generic (PLEG): container finished" podID="b6b6ca6f-0338-48e9-9397-61cf76346b26" containerID="71b4f9b02e1d185c13525910655145a0e1b13a012bd4ead62a4f7aed5f7719f6" exitCode=0 Dec 09 15:34:31 crc kubenswrapper[4716]: I1209 15:34:31.252370 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-84cd58648f-wb5xh" event={"ID":"b6b6ca6f-0338-48e9-9397-61cf76346b26","Type":"ContainerDied","Data":"71b4f9b02e1d185c13525910655145a0e1b13a012bd4ead62a4f7aed5f7719f6"} Dec 09 15:34:31 crc kubenswrapper[4716]: I1209 15:34:31.259849 4716 generic.go:334] "Generic (PLEG): container finished" podID="a0262168-a0cf-42c8-9ec5-cdb237c71db3" containerID="67b0b43296ec6b50302a2bf5f02cf97bbc69733172ccc4d8160c8e022fd3f527" exitCode=0 Dec 09 15:34:31 crc kubenswrapper[4716]: I1209 15:34:31.259967 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-e5db-account-create-update-gk6pn" event={"ID":"a0262168-a0cf-42c8-9ec5-cdb237c71db3","Type":"ContainerDied","Data":"67b0b43296ec6b50302a2bf5f02cf97bbc69733172ccc4d8160c8e022fd3f527"} Dec 09 15:34:31 crc kubenswrapper[4716]: I1209 15:34:31.265817 4716 generic.go:334] "Generic (PLEG): container finished" podID="ed21be51-d0c7-4c2b-ac68-c22f8664dea8" containerID="b8b37f2265ddc9bdd7fee3e78d96e3fb58abd7ce772c979f4dc7853e16e9018a" exitCode=0 Dec 09 15:34:31 crc kubenswrapper[4716]: I1209 15:34:31.266038 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-6jgz9" event={"ID":"ed21be51-d0c7-4c2b-ac68-c22f8664dea8","Type":"ContainerDied","Data":"b8b37f2265ddc9bdd7fee3e78d96e3fb58abd7ce772c979f4dc7853e16e9018a"} Dec 09 15:34:31 crc kubenswrapper[4716]: I1209 15:34:31.283329 4716 generic.go:334] "Generic (PLEG): container finished" podID="35c52dde-aad6-413a-9d1f-5bd7078372b4" containerID="d5ea2f329b5face113ee9b7f45d706f93184ac67550a181dd64cfe075ca17459" exitCode=0 Dec 09 15:34:31 crc kubenswrapper[4716]: I1209 15:34:31.283471 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-30c9-account-create-update-vtv2g" event={"ID":"35c52dde-aad6-413a-9d1f-5bd7078372b4","Type":"ContainerDied","Data":"d5ea2f329b5face113ee9b7f45d706f93184ac67550a181dd64cfe075ca17459"} Dec 09 15:34:31 crc kubenswrapper[4716]: I1209 15:34:31.286787 4716 generic.go:334] "Generic (PLEG): container finished" podID="3e5ab44d-6c6f-46d2-829d-a3699d131e2d" containerID="944e2b0ba86a1ee0740e595edc8a7f14470317f47f4d621d65711a13e2562075" exitCode=0 Dec 09 15:34:31 crc kubenswrapper[4716]: I1209 15:34:31.286879 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-7j5l9" event={"ID":"3e5ab44d-6c6f-46d2-829d-a3699d131e2d","Type":"ContainerDied","Data":"944e2b0ba86a1ee0740e595edc8a7f14470317f47f4d621d65711a13e2562075"} Dec 09 15:34:31 crc kubenswrapper[4716]: I1209 15:34:31.303567 4716 generic.go:334] "Generic (PLEG): container finished" podID="2ed85f3c-c70d-4400-8e17-28f12f0a4dbb" containerID="b68e0ace6fd0a5bf7ff95ee992b7aa2eed76561ada598c44b469dd9e698ead2b" exitCode=0 Dec 09 15:34:31 crc kubenswrapper[4716]: I1209 15:34:31.303700 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-7a56-account-create-update-shpjz" event={"ID":"2ed85f3c-c70d-4400-8e17-28f12f0a4dbb","Type":"ContainerDied","Data":"b68e0ace6fd0a5bf7ff95ee992b7aa2eed76561ada598c44b469dd9e698ead2b"} Dec 09 15:34:31 crc kubenswrapper[4716]: I1209 15:34:31.334278 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4af083f7-0a3a-4e5a-90f9-2a7f86fec042","Type":"ContainerStarted","Data":"d10512ac7f578c25ebfd5913915120958818416f9abe76a3f8d5ae0df03acea7"} Dec 09 15:34:31 crc kubenswrapper[4716]: I1209 15:34:31.361825 4716 generic.go:334] "Generic (PLEG): container finished" podID="34faeba3-0fa7-439c-888d-98bc7ec01369" containerID="79cf87c2a5c48521ab559f03622394647a4573e3926b0ae5e46e6b0c396dfbb6" exitCode=0 Dec 09 15:34:31 crc kubenswrapper[4716]: I1209 15:34:31.361887 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-448gp" event={"ID":"34faeba3-0fa7-439c-888d-98bc7ec01369","Type":"ContainerDied","Data":"79cf87c2a5c48521ab559f03622394647a4573e3926b0ae5e46e6b0c396dfbb6"} Dec 09 15:34:31 crc kubenswrapper[4716]: I1209 15:34:31.545169 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-84cd58648f-wb5xh" Dec 09 15:34:31 crc kubenswrapper[4716]: I1209 15:34:31.743286 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nst8v\" (UniqueName: \"kubernetes.io/projected/b6b6ca6f-0338-48e9-9397-61cf76346b26-kube-api-access-nst8v\") pod \"b6b6ca6f-0338-48e9-9397-61cf76346b26\" (UID: \"b6b6ca6f-0338-48e9-9397-61cf76346b26\") " Dec 09 15:34:31 crc kubenswrapper[4716]: I1209 15:34:31.743332 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6b6ca6f-0338-48e9-9397-61cf76346b26-combined-ca-bundle\") pod \"b6b6ca6f-0338-48e9-9397-61cf76346b26\" (UID: \"b6b6ca6f-0338-48e9-9397-61cf76346b26\") " Dec 09 15:34:31 crc kubenswrapper[4716]: I1209 15:34:31.743476 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6b6ca6f-0338-48e9-9397-61cf76346b26-config-data\") pod \"b6b6ca6f-0338-48e9-9397-61cf76346b26\" (UID: \"b6b6ca6f-0338-48e9-9397-61cf76346b26\") " Dec 09 15:34:31 crc kubenswrapper[4716]: I1209 15:34:31.743526 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b6b6ca6f-0338-48e9-9397-61cf76346b26-config-data-custom\") pod \"b6b6ca6f-0338-48e9-9397-61cf76346b26\" (UID: \"b6b6ca6f-0338-48e9-9397-61cf76346b26\") " Dec 09 15:34:31 crc kubenswrapper[4716]: I1209 15:34:31.766478 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6b6ca6f-0338-48e9-9397-61cf76346b26-kube-api-access-nst8v" (OuterVolumeSpecName: "kube-api-access-nst8v") pod "b6b6ca6f-0338-48e9-9397-61cf76346b26" (UID: "b6b6ca6f-0338-48e9-9397-61cf76346b26"). InnerVolumeSpecName "kube-api-access-nst8v". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:34:31 crc kubenswrapper[4716]: I1209 15:34:31.772726 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6b6ca6f-0338-48e9-9397-61cf76346b26-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "b6b6ca6f-0338-48e9-9397-61cf76346b26" (UID: "b6b6ca6f-0338-48e9-9397-61cf76346b26"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:34:31 crc kubenswrapper[4716]: I1209 15:34:31.847335 4716 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b6b6ca6f-0338-48e9-9397-61cf76346b26-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:31 crc kubenswrapper[4716]: I1209 15:34:31.847376 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nst8v\" (UniqueName: \"kubernetes.io/projected/b6b6ca6f-0338-48e9-9397-61cf76346b26-kube-api-access-nst8v\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:31 crc kubenswrapper[4716]: I1209 15:34:31.872868 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6b6ca6f-0338-48e9-9397-61cf76346b26-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b6b6ca6f-0338-48e9-9397-61cf76346b26" (UID: "b6b6ca6f-0338-48e9-9397-61cf76346b26"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:34:31 crc kubenswrapper[4716]: I1209 15:34:31.950330 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6b6ca6f-0338-48e9-9397-61cf76346b26-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:31 crc kubenswrapper[4716]: I1209 15:34:31.950479 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6b6ca6f-0338-48e9-9397-61cf76346b26-config-data" (OuterVolumeSpecName: "config-data") pod "b6b6ca6f-0338-48e9-9397-61cf76346b26" (UID: "b6b6ca6f-0338-48e9-9397-61cf76346b26"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:34:32 crc kubenswrapper[4716]: I1209 15:34:32.053046 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6b6ca6f-0338-48e9-9397-61cf76346b26-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:32 crc kubenswrapper[4716]: I1209 15:34:32.378463 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4af083f7-0a3a-4e5a-90f9-2a7f86fec042","Type":"ContainerStarted","Data":"2519e43f41ec24c433b05e3de41ae90f170aa287a361582d4eeb737c62896ebe"} Dec 09 15:34:32 crc kubenswrapper[4716]: I1209 15:34:32.378575 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 09 15:34:32 crc kubenswrapper[4716]: I1209 15:34:32.382383 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-84cd58648f-wb5xh" event={"ID":"b6b6ca6f-0338-48e9-9397-61cf76346b26","Type":"ContainerDied","Data":"4ac307b276ca1515e9e9ac091c095779101555a0cba2ae6cc95f769bbfa1e629"} Dec 09 15:34:32 crc kubenswrapper[4716]: I1209 15:34:32.382499 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-84cd58648f-wb5xh" Dec 09 15:34:32 crc kubenswrapper[4716]: I1209 15:34:32.382772 4716 scope.go:117] "RemoveContainer" containerID="71b4f9b02e1d185c13525910655145a0e1b13a012bd4ead62a4f7aed5f7719f6" Dec 09 15:34:32 crc kubenswrapper[4716]: I1209 15:34:32.428641 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.6247933420000003 podStartE2EDuration="7.428566342s" podCreationTimestamp="2025-12-09 15:34:25 +0000 UTC" firstStartedPulling="2025-12-09 15:34:26.925066449 +0000 UTC m=+1554.079810437" lastFinishedPulling="2025-12-09 15:34:31.728839439 +0000 UTC m=+1558.883583437" observedRunningTime="2025-12-09 15:34:32.408699899 +0000 UTC m=+1559.563443887" watchObservedRunningTime="2025-12-09 15:34:32.428566342 +0000 UTC m=+1559.583310330" Dec 09 15:34:32 crc kubenswrapper[4716]: I1209 15:34:32.470764 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-84cd58648f-wb5xh"] Dec 09 15:34:32 crc kubenswrapper[4716]: I1209 15:34:32.496319 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-engine-84cd58648f-wb5xh"] Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.029218 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-448gp" Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.195998 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/34faeba3-0fa7-439c-888d-98bc7ec01369-operator-scripts\") pod \"34faeba3-0fa7-439c-888d-98bc7ec01369\" (UID: \"34faeba3-0fa7-439c-888d-98bc7ec01369\") " Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.215639 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/34faeba3-0fa7-439c-888d-98bc7ec01369-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "34faeba3-0fa7-439c-888d-98bc7ec01369" (UID: "34faeba3-0fa7-439c-888d-98bc7ec01369"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.229545 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vmc5r\" (UniqueName: \"kubernetes.io/projected/34faeba3-0fa7-439c-888d-98bc7ec01369-kube-api-access-vmc5r\") pod \"34faeba3-0fa7-439c-888d-98bc7ec01369\" (UID: \"34faeba3-0fa7-439c-888d-98bc7ec01369\") " Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.233047 4716 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/34faeba3-0fa7-439c-888d-98bc7ec01369-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.241929 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34faeba3-0fa7-439c-888d-98bc7ec01369-kube-api-access-vmc5r" (OuterVolumeSpecName: "kube-api-access-vmc5r") pod "34faeba3-0fa7-439c-888d-98bc7ec01369" (UID: "34faeba3-0fa7-439c-888d-98bc7ec01369"). InnerVolumeSpecName "kube-api-access-vmc5r". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.295046 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6b6ca6f-0338-48e9-9397-61cf76346b26" path="/var/lib/kubelet/pods/b6b6ca6f-0338-48e9-9397-61cf76346b26/volumes" Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.337917 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vmc5r\" (UniqueName: \"kubernetes.io/projected/34faeba3-0fa7-439c-888d-98bc7ec01369-kube-api-access-vmc5r\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.429888 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-7j5l9" event={"ID":"3e5ab44d-6c6f-46d2-829d-a3699d131e2d","Type":"ContainerDied","Data":"375ef45b926bd8c041a5001b199e7b8d57330b6328fc56bd99de71f580a6e4fa"} Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.429927 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="375ef45b926bd8c041a5001b199e7b8d57330b6328fc56bd99de71f580a6e4fa" Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.443886 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-448gp" event={"ID":"34faeba3-0fa7-439c-888d-98bc7ec01369","Type":"ContainerDied","Data":"f67563aafbc566dfbf1af541407782cba6bdf7b9bdaf36816b1c4432c8b1e420"} Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.443939 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f67563aafbc566dfbf1af541407782cba6bdf7b9bdaf36816b1c4432c8b1e420" Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.444011 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-448gp" Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.456780 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-e5db-account-create-update-gk6pn" event={"ID":"a0262168-a0cf-42c8-9ec5-cdb237c71db3","Type":"ContainerDied","Data":"21369d44d8b02aac3e342b280b4cc2e655add83ce4b005c5b0fd0cb5303fc3bc"} Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.456887 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="21369d44d8b02aac3e342b280b4cc2e655add83ce4b005c5b0fd0cb5303fc3bc" Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.601002 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-e5db-account-create-update-gk6pn" Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.604923 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-7j5l9" Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.635876 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-7a56-account-create-update-shpjz" Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.654248 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3e5ab44d-6c6f-46d2-829d-a3699d131e2d-operator-scripts\") pod \"3e5ab44d-6c6f-46d2-829d-a3699d131e2d\" (UID: \"3e5ab44d-6c6f-46d2-829d-a3699d131e2d\") " Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.654439 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a0262168-a0cf-42c8-9ec5-cdb237c71db3-operator-scripts\") pod \"a0262168-a0cf-42c8-9ec5-cdb237c71db3\" (UID: \"a0262168-a0cf-42c8-9ec5-cdb237c71db3\") " Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.654253 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-6jgz9" Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.654773 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3e5ab44d-6c6f-46d2-829d-a3699d131e2d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3e5ab44d-6c6f-46d2-829d-a3699d131e2d" (UID: "3e5ab44d-6c6f-46d2-829d-a3699d131e2d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.655367 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a0262168-a0cf-42c8-9ec5-cdb237c71db3-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a0262168-a0cf-42c8-9ec5-cdb237c71db3" (UID: "a0262168-a0cf-42c8-9ec5-cdb237c71db3"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.664372 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mb4wn\" (UniqueName: \"kubernetes.io/projected/a0262168-a0cf-42c8-9ec5-cdb237c71db3-kube-api-access-mb4wn\") pod \"a0262168-a0cf-42c8-9ec5-cdb237c71db3\" (UID: \"a0262168-a0cf-42c8-9ec5-cdb237c71db3\") " Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.664675 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-578f9\" (UniqueName: \"kubernetes.io/projected/3e5ab44d-6c6f-46d2-829d-a3699d131e2d-kube-api-access-578f9\") pod \"3e5ab44d-6c6f-46d2-829d-a3699d131e2d\" (UID: \"3e5ab44d-6c6f-46d2-829d-a3699d131e2d\") " Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.665721 4716 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3e5ab44d-6c6f-46d2-829d-a3699d131e2d-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.665748 4716 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a0262168-a0cf-42c8-9ec5-cdb237c71db3-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.682036 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3e5ab44d-6c6f-46d2-829d-a3699d131e2d-kube-api-access-578f9" (OuterVolumeSpecName: "kube-api-access-578f9") pod "3e5ab44d-6c6f-46d2-829d-a3699d131e2d" (UID: "3e5ab44d-6c6f-46d2-829d-a3699d131e2d"). InnerVolumeSpecName "kube-api-access-578f9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.682778 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-30c9-account-create-update-vtv2g" Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.684876 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0262168-a0cf-42c8-9ec5-cdb237c71db3-kube-api-access-mb4wn" (OuterVolumeSpecName: "kube-api-access-mb4wn") pod "a0262168-a0cf-42c8-9ec5-cdb237c71db3" (UID: "a0262168-a0cf-42c8-9ec5-cdb237c71db3"). InnerVolumeSpecName "kube-api-access-mb4wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.767558 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ed21be51-d0c7-4c2b-ac68-c22f8664dea8-operator-scripts\") pod \"ed21be51-d0c7-4c2b-ac68-c22f8664dea8\" (UID: \"ed21be51-d0c7-4c2b-ac68-c22f8664dea8\") " Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.767955 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qszgt\" (UniqueName: \"kubernetes.io/projected/35c52dde-aad6-413a-9d1f-5bd7078372b4-kube-api-access-qszgt\") pod \"35c52dde-aad6-413a-9d1f-5bd7078372b4\" (UID: \"35c52dde-aad6-413a-9d1f-5bd7078372b4\") " Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.768510 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z8zvj\" (UniqueName: \"kubernetes.io/projected/ed21be51-d0c7-4c2b-ac68-c22f8664dea8-kube-api-access-z8zvj\") pod \"ed21be51-d0c7-4c2b-ac68-c22f8664dea8\" (UID: \"ed21be51-d0c7-4c2b-ac68-c22f8664dea8\") " Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.768509 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed21be51-d0c7-4c2b-ac68-c22f8664dea8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ed21be51-d0c7-4c2b-ac68-c22f8664dea8" (UID: "ed21be51-d0c7-4c2b-ac68-c22f8664dea8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.768809 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/35c52dde-aad6-413a-9d1f-5bd7078372b4-operator-scripts\") pod \"35c52dde-aad6-413a-9d1f-5bd7078372b4\" (UID: \"35c52dde-aad6-413a-9d1f-5bd7078372b4\") " Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.768878 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-82rm7\" (UniqueName: \"kubernetes.io/projected/2ed85f3c-c70d-4400-8e17-28f12f0a4dbb-kube-api-access-82rm7\") pod \"2ed85f3c-c70d-4400-8e17-28f12f0a4dbb\" (UID: \"2ed85f3c-c70d-4400-8e17-28f12f0a4dbb\") " Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.768956 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2ed85f3c-c70d-4400-8e17-28f12f0a4dbb-operator-scripts\") pod \"2ed85f3c-c70d-4400-8e17-28f12f0a4dbb\" (UID: \"2ed85f3c-c70d-4400-8e17-28f12f0a4dbb\") " Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.769389 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/35c52dde-aad6-413a-9d1f-5bd7078372b4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "35c52dde-aad6-413a-9d1f-5bd7078372b4" (UID: "35c52dde-aad6-413a-9d1f-5bd7078372b4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.769881 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ed85f3c-c70d-4400-8e17-28f12f0a4dbb-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2ed85f3c-c70d-4400-8e17-28f12f0a4dbb" (UID: "2ed85f3c-c70d-4400-8e17-28f12f0a4dbb"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.770257 4716 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/35c52dde-aad6-413a-9d1f-5bd7078372b4-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.770294 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mb4wn\" (UniqueName: \"kubernetes.io/projected/a0262168-a0cf-42c8-9ec5-cdb237c71db3-kube-api-access-mb4wn\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.770314 4716 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2ed85f3c-c70d-4400-8e17-28f12f0a4dbb-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.770325 4716 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ed21be51-d0c7-4c2b-ac68-c22f8664dea8-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.770340 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-578f9\" (UniqueName: \"kubernetes.io/projected/3e5ab44d-6c6f-46d2-829d-a3699d131e2d-kube-api-access-578f9\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.771936 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/35c52dde-aad6-413a-9d1f-5bd7078372b4-kube-api-access-qszgt" (OuterVolumeSpecName: "kube-api-access-qszgt") pod "35c52dde-aad6-413a-9d1f-5bd7078372b4" (UID: "35c52dde-aad6-413a-9d1f-5bd7078372b4"). InnerVolumeSpecName "kube-api-access-qszgt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.777910 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ed85f3c-c70d-4400-8e17-28f12f0a4dbb-kube-api-access-82rm7" (OuterVolumeSpecName: "kube-api-access-82rm7") pod "2ed85f3c-c70d-4400-8e17-28f12f0a4dbb" (UID: "2ed85f3c-c70d-4400-8e17-28f12f0a4dbb"). InnerVolumeSpecName "kube-api-access-82rm7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.779465 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed21be51-d0c7-4c2b-ac68-c22f8664dea8-kube-api-access-z8zvj" (OuterVolumeSpecName: "kube-api-access-z8zvj") pod "ed21be51-d0c7-4c2b-ac68-c22f8664dea8" (UID: "ed21be51-d0c7-4c2b-ac68-c22f8664dea8"). InnerVolumeSpecName "kube-api-access-z8zvj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.872790 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-82rm7\" (UniqueName: \"kubernetes.io/projected/2ed85f3c-c70d-4400-8e17-28f12f0a4dbb-kube-api-access-82rm7\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.872833 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qszgt\" (UniqueName: \"kubernetes.io/projected/35c52dde-aad6-413a-9d1f-5bd7078372b4-kube-api-access-qszgt\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:33 crc kubenswrapper[4716]: I1209 15:34:33.872849 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z8zvj\" (UniqueName: \"kubernetes.io/projected/ed21be51-d0c7-4c2b-ac68-c22f8664dea8-kube-api-access-z8zvj\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:34 crc kubenswrapper[4716]: I1209 15:34:34.467238 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-7a56-account-create-update-shpjz" event={"ID":"2ed85f3c-c70d-4400-8e17-28f12f0a4dbb","Type":"ContainerDied","Data":"b06a85b4e00dda5dbf106eb25f610c16cbaecf156ba3bb03ffcc749447c002dd"} Dec 09 15:34:34 crc kubenswrapper[4716]: I1209 15:34:34.467301 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b06a85b4e00dda5dbf106eb25f610c16cbaecf156ba3bb03ffcc749447c002dd" Dec 09 15:34:34 crc kubenswrapper[4716]: I1209 15:34:34.467301 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-7a56-account-create-update-shpjz" Dec 09 15:34:34 crc kubenswrapper[4716]: I1209 15:34:34.469272 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-6jgz9" event={"ID":"ed21be51-d0c7-4c2b-ac68-c22f8664dea8","Type":"ContainerDied","Data":"fd24339a682bf564eec6e77e42eaaef5dac8851322ec355888029b326eda2b7d"} Dec 09 15:34:34 crc kubenswrapper[4716]: I1209 15:34:34.469303 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fd24339a682bf564eec6e77e42eaaef5dac8851322ec355888029b326eda2b7d" Dec 09 15:34:34 crc kubenswrapper[4716]: I1209 15:34:34.469278 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-6jgz9" Dec 09 15:34:34 crc kubenswrapper[4716]: I1209 15:34:34.470770 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-30c9-account-create-update-vtv2g" event={"ID":"35c52dde-aad6-413a-9d1f-5bd7078372b4","Type":"ContainerDied","Data":"8a1e3bacecafa72bcb89b2e0d543212ebb713d2a4e29298f1399db60f7c6641e"} Dec 09 15:34:34 crc kubenswrapper[4716]: I1209 15:34:34.470796 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8a1e3bacecafa72bcb89b2e0d543212ebb713d2a4e29298f1399db60f7c6641e" Dec 09 15:34:34 crc kubenswrapper[4716]: I1209 15:34:34.470808 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-7j5l9" Dec 09 15:34:34 crc kubenswrapper[4716]: I1209 15:34:34.470834 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-e5db-account-create-update-gk6pn" Dec 09 15:34:34 crc kubenswrapper[4716]: I1209 15:34:34.470897 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-30c9-account-create-update-vtv2g" Dec 09 15:34:35 crc kubenswrapper[4716]: I1209 15:34:35.140761 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:34:35 crc kubenswrapper[4716]: I1209 15:34:35.141322 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4af083f7-0a3a-4e5a-90f9-2a7f86fec042" containerName="ceilometer-central-agent" containerID="cri-o://7bcd9a422e3f6c94879f6d05238825cd8a88efbbb064ea62af0a135bfb6b7b0b" gracePeriod=30 Dec 09 15:34:35 crc kubenswrapper[4716]: I1209 15:34:35.141407 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4af083f7-0a3a-4e5a-90f9-2a7f86fec042" containerName="proxy-httpd" containerID="cri-o://2519e43f41ec24c433b05e3de41ae90f170aa287a361582d4eeb737c62896ebe" gracePeriod=30 Dec 09 15:34:35 crc kubenswrapper[4716]: I1209 15:34:35.141489 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4af083f7-0a3a-4e5a-90f9-2a7f86fec042" containerName="ceilometer-notification-agent" containerID="cri-o://bf9d572dbce63d3ce353ec411aab2b7be0f0a93d4e21405e44472322334d26bd" gracePeriod=30 Dec 09 15:34:35 crc kubenswrapper[4716]: I1209 15:34:35.141470 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4af083f7-0a3a-4e5a-90f9-2a7f86fec042" containerName="sg-core" containerID="cri-o://d10512ac7f578c25ebfd5913915120958818416f9abe76a3f8d5ae0df03acea7" gracePeriod=30 Dec 09 15:34:35 crc kubenswrapper[4716]: I1209 15:34:35.485634 4716 generic.go:334] "Generic (PLEG): container finished" podID="4af083f7-0a3a-4e5a-90f9-2a7f86fec042" containerID="2519e43f41ec24c433b05e3de41ae90f170aa287a361582d4eeb737c62896ebe" exitCode=0 Dec 09 15:34:35 crc kubenswrapper[4716]: I1209 15:34:35.485708 4716 generic.go:334] "Generic (PLEG): container finished" podID="4af083f7-0a3a-4e5a-90f9-2a7f86fec042" containerID="d10512ac7f578c25ebfd5913915120958818416f9abe76a3f8d5ae0df03acea7" exitCode=2 Dec 09 15:34:35 crc kubenswrapper[4716]: I1209 15:34:35.485660 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4af083f7-0a3a-4e5a-90f9-2a7f86fec042","Type":"ContainerDied","Data":"2519e43f41ec24c433b05e3de41ae90f170aa287a361582d4eeb737c62896ebe"} Dec 09 15:34:35 crc kubenswrapper[4716]: I1209 15:34:35.485758 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4af083f7-0a3a-4e5a-90f9-2a7f86fec042","Type":"ContainerDied","Data":"d10512ac7f578c25ebfd5913915120958818416f9abe76a3f8d5ae0df03acea7"} Dec 09 15:34:36 crc kubenswrapper[4716]: I1209 15:34:36.500157 4716 generic.go:334] "Generic (PLEG): container finished" podID="4af083f7-0a3a-4e5a-90f9-2a7f86fec042" containerID="bf9d572dbce63d3ce353ec411aab2b7be0f0a93d4e21405e44472322334d26bd" exitCode=0 Dec 09 15:34:36 crc kubenswrapper[4716]: I1209 15:34:36.500229 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4af083f7-0a3a-4e5a-90f9-2a7f86fec042","Type":"ContainerDied","Data":"bf9d572dbce63d3ce353ec411aab2b7be0f0a93d4e21405e44472322334d26bd"} Dec 09 15:34:38 crc kubenswrapper[4716]: I1209 15:34:38.268310 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-dm4wb"] Dec 09 15:34:38 crc kubenswrapper[4716]: E1209 15:34:38.270077 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e5ab44d-6c6f-46d2-829d-a3699d131e2d" containerName="mariadb-database-create" Dec 09 15:34:38 crc kubenswrapper[4716]: I1209 15:34:38.270196 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e5ab44d-6c6f-46d2-829d-a3699d131e2d" containerName="mariadb-database-create" Dec 09 15:34:38 crc kubenswrapper[4716]: E1209 15:34:38.270282 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35c52dde-aad6-413a-9d1f-5bd7078372b4" containerName="mariadb-account-create-update" Dec 09 15:34:38 crc kubenswrapper[4716]: I1209 15:34:38.270348 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="35c52dde-aad6-413a-9d1f-5bd7078372b4" containerName="mariadb-account-create-update" Dec 09 15:34:38 crc kubenswrapper[4716]: E1209 15:34:38.270425 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0262168-a0cf-42c8-9ec5-cdb237c71db3" containerName="mariadb-account-create-update" Dec 09 15:34:38 crc kubenswrapper[4716]: I1209 15:34:38.270490 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0262168-a0cf-42c8-9ec5-cdb237c71db3" containerName="mariadb-account-create-update" Dec 09 15:34:38 crc kubenswrapper[4716]: E1209 15:34:38.270564 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ed85f3c-c70d-4400-8e17-28f12f0a4dbb" containerName="mariadb-account-create-update" Dec 09 15:34:38 crc kubenswrapper[4716]: I1209 15:34:38.270641 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ed85f3c-c70d-4400-8e17-28f12f0a4dbb" containerName="mariadb-account-create-update" Dec 09 15:34:38 crc kubenswrapper[4716]: E1209 15:34:38.270738 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed21be51-d0c7-4c2b-ac68-c22f8664dea8" containerName="mariadb-database-create" Dec 09 15:34:38 crc kubenswrapper[4716]: I1209 15:34:38.270818 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed21be51-d0c7-4c2b-ac68-c22f8664dea8" containerName="mariadb-database-create" Dec 09 15:34:38 crc kubenswrapper[4716]: E1209 15:34:38.270922 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34faeba3-0fa7-439c-888d-98bc7ec01369" containerName="mariadb-database-create" Dec 09 15:34:38 crc kubenswrapper[4716]: I1209 15:34:38.270995 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="34faeba3-0fa7-439c-888d-98bc7ec01369" containerName="mariadb-database-create" Dec 09 15:34:38 crc kubenswrapper[4716]: E1209 15:34:38.271064 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6b6ca6f-0338-48e9-9397-61cf76346b26" containerName="heat-engine" Dec 09 15:34:38 crc kubenswrapper[4716]: I1209 15:34:38.271124 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6b6ca6f-0338-48e9-9397-61cf76346b26" containerName="heat-engine" Dec 09 15:34:38 crc kubenswrapper[4716]: I1209 15:34:38.271412 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6b6ca6f-0338-48e9-9397-61cf76346b26" containerName="heat-engine" Dec 09 15:34:38 crc kubenswrapper[4716]: I1209 15:34:38.271491 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="34faeba3-0fa7-439c-888d-98bc7ec01369" containerName="mariadb-database-create" Dec 09 15:34:38 crc kubenswrapper[4716]: I1209 15:34:38.271562 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ed85f3c-c70d-4400-8e17-28f12f0a4dbb" containerName="mariadb-account-create-update" Dec 09 15:34:38 crc kubenswrapper[4716]: I1209 15:34:38.271658 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0262168-a0cf-42c8-9ec5-cdb237c71db3" containerName="mariadb-account-create-update" Dec 09 15:34:38 crc kubenswrapper[4716]: I1209 15:34:38.271763 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e5ab44d-6c6f-46d2-829d-a3699d131e2d" containerName="mariadb-database-create" Dec 09 15:34:38 crc kubenswrapper[4716]: I1209 15:34:38.271840 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed21be51-d0c7-4c2b-ac68-c22f8664dea8" containerName="mariadb-database-create" Dec 09 15:34:38 crc kubenswrapper[4716]: I1209 15:34:38.271905 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="35c52dde-aad6-413a-9d1f-5bd7078372b4" containerName="mariadb-account-create-update" Dec 09 15:34:38 crc kubenswrapper[4716]: I1209 15:34:38.272894 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-dm4wb" Dec 09 15:34:38 crc kubenswrapper[4716]: I1209 15:34:38.276548 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Dec 09 15:34:38 crc kubenswrapper[4716]: I1209 15:34:38.276606 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-nnrk9" Dec 09 15:34:38 crc kubenswrapper[4716]: I1209 15:34:38.277132 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Dec 09 15:34:38 crc kubenswrapper[4716]: I1209 15:34:38.303689 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-dm4wb"] Dec 09 15:34:38 crc kubenswrapper[4716]: I1209 15:34:38.383183 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/361ee6cb-d557-4f46-8cf8-da9c121604e2-config-data\") pod \"nova-cell0-conductor-db-sync-dm4wb\" (UID: \"361ee6cb-d557-4f46-8cf8-da9c121604e2\") " pod="openstack/nova-cell0-conductor-db-sync-dm4wb" Dec 09 15:34:38 crc kubenswrapper[4716]: I1209 15:34:38.383304 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jn75s\" (UniqueName: \"kubernetes.io/projected/361ee6cb-d557-4f46-8cf8-da9c121604e2-kube-api-access-jn75s\") pod \"nova-cell0-conductor-db-sync-dm4wb\" (UID: \"361ee6cb-d557-4f46-8cf8-da9c121604e2\") " pod="openstack/nova-cell0-conductor-db-sync-dm4wb" Dec 09 15:34:38 crc kubenswrapper[4716]: I1209 15:34:38.383441 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/361ee6cb-d557-4f46-8cf8-da9c121604e2-scripts\") pod \"nova-cell0-conductor-db-sync-dm4wb\" (UID: \"361ee6cb-d557-4f46-8cf8-da9c121604e2\") " pod="openstack/nova-cell0-conductor-db-sync-dm4wb" Dec 09 15:34:38 crc kubenswrapper[4716]: I1209 15:34:38.383560 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/361ee6cb-d557-4f46-8cf8-da9c121604e2-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-dm4wb\" (UID: \"361ee6cb-d557-4f46-8cf8-da9c121604e2\") " pod="openstack/nova-cell0-conductor-db-sync-dm4wb" Dec 09 15:34:38 crc kubenswrapper[4716]: I1209 15:34:38.485930 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/361ee6cb-d557-4f46-8cf8-da9c121604e2-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-dm4wb\" (UID: \"361ee6cb-d557-4f46-8cf8-da9c121604e2\") " pod="openstack/nova-cell0-conductor-db-sync-dm4wb" Dec 09 15:34:38 crc kubenswrapper[4716]: I1209 15:34:38.486146 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/361ee6cb-d557-4f46-8cf8-da9c121604e2-config-data\") pod \"nova-cell0-conductor-db-sync-dm4wb\" (UID: \"361ee6cb-d557-4f46-8cf8-da9c121604e2\") " pod="openstack/nova-cell0-conductor-db-sync-dm4wb" Dec 09 15:34:38 crc kubenswrapper[4716]: I1209 15:34:38.486206 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jn75s\" (UniqueName: \"kubernetes.io/projected/361ee6cb-d557-4f46-8cf8-da9c121604e2-kube-api-access-jn75s\") pod \"nova-cell0-conductor-db-sync-dm4wb\" (UID: \"361ee6cb-d557-4f46-8cf8-da9c121604e2\") " pod="openstack/nova-cell0-conductor-db-sync-dm4wb" Dec 09 15:34:38 crc kubenswrapper[4716]: I1209 15:34:38.486311 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/361ee6cb-d557-4f46-8cf8-da9c121604e2-scripts\") pod \"nova-cell0-conductor-db-sync-dm4wb\" (UID: \"361ee6cb-d557-4f46-8cf8-da9c121604e2\") " pod="openstack/nova-cell0-conductor-db-sync-dm4wb" Dec 09 15:34:38 crc kubenswrapper[4716]: I1209 15:34:38.492395 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/361ee6cb-d557-4f46-8cf8-da9c121604e2-scripts\") pod \"nova-cell0-conductor-db-sync-dm4wb\" (UID: \"361ee6cb-d557-4f46-8cf8-da9c121604e2\") " pod="openstack/nova-cell0-conductor-db-sync-dm4wb" Dec 09 15:34:38 crc kubenswrapper[4716]: I1209 15:34:38.492427 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/361ee6cb-d557-4f46-8cf8-da9c121604e2-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-dm4wb\" (UID: \"361ee6cb-d557-4f46-8cf8-da9c121604e2\") " pod="openstack/nova-cell0-conductor-db-sync-dm4wb" Dec 09 15:34:38 crc kubenswrapper[4716]: I1209 15:34:38.492614 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/361ee6cb-d557-4f46-8cf8-da9c121604e2-config-data\") pod \"nova-cell0-conductor-db-sync-dm4wb\" (UID: \"361ee6cb-d557-4f46-8cf8-da9c121604e2\") " pod="openstack/nova-cell0-conductor-db-sync-dm4wb" Dec 09 15:34:38 crc kubenswrapper[4716]: I1209 15:34:38.506452 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jn75s\" (UniqueName: \"kubernetes.io/projected/361ee6cb-d557-4f46-8cf8-da9c121604e2-kube-api-access-jn75s\") pod \"nova-cell0-conductor-db-sync-dm4wb\" (UID: \"361ee6cb-d557-4f46-8cf8-da9c121604e2\") " pod="openstack/nova-cell0-conductor-db-sync-dm4wb" Dec 09 15:34:38 crc kubenswrapper[4716]: I1209 15:34:38.627645 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-dm4wb" Dec 09 15:34:39 crc kubenswrapper[4716]: I1209 15:34:39.404237 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-dm4wb"] Dec 09 15:34:39 crc kubenswrapper[4716]: I1209 15:34:39.538046 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-dm4wb" event={"ID":"361ee6cb-d557-4f46-8cf8-da9c121604e2","Type":"ContainerStarted","Data":"0f9704e99c4c412f73d36398557de0d4dcc7217eaa73da6d815852d27a5b8575"} Dec 09 15:34:46 crc kubenswrapper[4716]: I1209 15:34:46.633146 4716 generic.go:334] "Generic (PLEG): container finished" podID="4af083f7-0a3a-4e5a-90f9-2a7f86fec042" containerID="7bcd9a422e3f6c94879f6d05238825cd8a88efbbb064ea62af0a135bfb6b7b0b" exitCode=0 Dec 09 15:34:46 crc kubenswrapper[4716]: I1209 15:34:46.633250 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4af083f7-0a3a-4e5a-90f9-2a7f86fec042","Type":"ContainerDied","Data":"7bcd9a422e3f6c94879f6d05238825cd8a88efbbb064ea62af0a135bfb6b7b0b"} Dec 09 15:34:48 crc kubenswrapper[4716]: I1209 15:34:48.817914 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 15:34:48 crc kubenswrapper[4716]: I1209 15:34:48.887076 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-log-httpd\") pod \"4af083f7-0a3a-4e5a-90f9-2a7f86fec042\" (UID: \"4af083f7-0a3a-4e5a-90f9-2a7f86fec042\") " Dec 09 15:34:48 crc kubenswrapper[4716]: I1209 15:34:48.887375 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g6b2h\" (UniqueName: \"kubernetes.io/projected/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-kube-api-access-g6b2h\") pod \"4af083f7-0a3a-4e5a-90f9-2a7f86fec042\" (UID: \"4af083f7-0a3a-4e5a-90f9-2a7f86fec042\") " Dec 09 15:34:48 crc kubenswrapper[4716]: I1209 15:34:48.887408 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-scripts\") pod \"4af083f7-0a3a-4e5a-90f9-2a7f86fec042\" (UID: \"4af083f7-0a3a-4e5a-90f9-2a7f86fec042\") " Dec 09 15:34:48 crc kubenswrapper[4716]: I1209 15:34:48.887504 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-combined-ca-bundle\") pod \"4af083f7-0a3a-4e5a-90f9-2a7f86fec042\" (UID: \"4af083f7-0a3a-4e5a-90f9-2a7f86fec042\") " Dec 09 15:34:48 crc kubenswrapper[4716]: I1209 15:34:48.887545 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-sg-core-conf-yaml\") pod \"4af083f7-0a3a-4e5a-90f9-2a7f86fec042\" (UID: \"4af083f7-0a3a-4e5a-90f9-2a7f86fec042\") " Dec 09 15:34:48 crc kubenswrapper[4716]: I1209 15:34:48.887652 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-run-httpd\") pod \"4af083f7-0a3a-4e5a-90f9-2a7f86fec042\" (UID: \"4af083f7-0a3a-4e5a-90f9-2a7f86fec042\") " Dec 09 15:34:48 crc kubenswrapper[4716]: I1209 15:34:48.887682 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-config-data\") pod \"4af083f7-0a3a-4e5a-90f9-2a7f86fec042\" (UID: \"4af083f7-0a3a-4e5a-90f9-2a7f86fec042\") " Dec 09 15:34:48 crc kubenswrapper[4716]: I1209 15:34:48.888207 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "4af083f7-0a3a-4e5a-90f9-2a7f86fec042" (UID: "4af083f7-0a3a-4e5a-90f9-2a7f86fec042"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:34:48 crc kubenswrapper[4716]: I1209 15:34:48.888236 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "4af083f7-0a3a-4e5a-90f9-2a7f86fec042" (UID: "4af083f7-0a3a-4e5a-90f9-2a7f86fec042"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:34:48 crc kubenswrapper[4716]: I1209 15:34:48.889492 4716 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:48 crc kubenswrapper[4716]: I1209 15:34:48.889524 4716 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:48 crc kubenswrapper[4716]: I1209 15:34:48.898860 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-kube-api-access-g6b2h" (OuterVolumeSpecName: "kube-api-access-g6b2h") pod "4af083f7-0a3a-4e5a-90f9-2a7f86fec042" (UID: "4af083f7-0a3a-4e5a-90f9-2a7f86fec042"). InnerVolumeSpecName "kube-api-access-g6b2h". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:34:48 crc kubenswrapper[4716]: I1209 15:34:48.901206 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-scripts" (OuterVolumeSpecName: "scripts") pod "4af083f7-0a3a-4e5a-90f9-2a7f86fec042" (UID: "4af083f7-0a3a-4e5a-90f9-2a7f86fec042"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:34:48 crc kubenswrapper[4716]: I1209 15:34:48.936411 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "4af083f7-0a3a-4e5a-90f9-2a7f86fec042" (UID: "4af083f7-0a3a-4e5a-90f9-2a7f86fec042"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:34:48 crc kubenswrapper[4716]: I1209 15:34:48.991846 4716 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:48 crc kubenswrapper[4716]: I1209 15:34:48.991926 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g6b2h\" (UniqueName: \"kubernetes.io/projected/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-kube-api-access-g6b2h\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:48 crc kubenswrapper[4716]: I1209 15:34:48.991940 4716 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.030868 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4af083f7-0a3a-4e5a-90f9-2a7f86fec042" (UID: "4af083f7-0a3a-4e5a-90f9-2a7f86fec042"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.039903 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-config-data" (OuterVolumeSpecName: "config-data") pod "4af083f7-0a3a-4e5a-90f9-2a7f86fec042" (UID: "4af083f7-0a3a-4e5a-90f9-2a7f86fec042"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.094043 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.094088 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4af083f7-0a3a-4e5a-90f9-2a7f86fec042-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.682802 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4af083f7-0a3a-4e5a-90f9-2a7f86fec042","Type":"ContainerDied","Data":"741256c0a2ba627799481cd7c98bc5b861a124067da65e4d30fc6e4861b8d19f"} Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.683221 4716 scope.go:117] "RemoveContainer" containerID="2519e43f41ec24c433b05e3de41ae90f170aa287a361582d4eeb737c62896ebe" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.683034 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.687278 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-dm4wb" event={"ID":"361ee6cb-d557-4f46-8cf8-da9c121604e2","Type":"ContainerStarted","Data":"cd06bf863ee800bf49b74513ca377f9acf689ffa0e5dfe55a5264f68c1fe45ac"} Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.710124 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.714898 4716 scope.go:117] "RemoveContainer" containerID="d10512ac7f578c25ebfd5913915120958818416f9abe76a3f8d5ae0df03acea7" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.720785 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.738900 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:34:49 crc kubenswrapper[4716]: E1209 15:34:49.739635 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4af083f7-0a3a-4e5a-90f9-2a7f86fec042" containerName="ceilometer-central-agent" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.739653 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="4af083f7-0a3a-4e5a-90f9-2a7f86fec042" containerName="ceilometer-central-agent" Dec 09 15:34:49 crc kubenswrapper[4716]: E1209 15:34:49.739691 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4af083f7-0a3a-4e5a-90f9-2a7f86fec042" containerName="sg-core" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.739697 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="4af083f7-0a3a-4e5a-90f9-2a7f86fec042" containerName="sg-core" Dec 09 15:34:49 crc kubenswrapper[4716]: E1209 15:34:49.739717 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4af083f7-0a3a-4e5a-90f9-2a7f86fec042" containerName="ceilometer-notification-agent" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.739723 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="4af083f7-0a3a-4e5a-90f9-2a7f86fec042" containerName="ceilometer-notification-agent" Dec 09 15:34:49 crc kubenswrapper[4716]: E1209 15:34:49.739750 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4af083f7-0a3a-4e5a-90f9-2a7f86fec042" containerName="proxy-httpd" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.739757 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="4af083f7-0a3a-4e5a-90f9-2a7f86fec042" containerName="proxy-httpd" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.739980 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="4af083f7-0a3a-4e5a-90f9-2a7f86fec042" containerName="sg-core" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.739990 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="4af083f7-0a3a-4e5a-90f9-2a7f86fec042" containerName="ceilometer-notification-agent" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.740008 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="4af083f7-0a3a-4e5a-90f9-2a7f86fec042" containerName="proxy-httpd" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.740017 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="4af083f7-0a3a-4e5a-90f9-2a7f86fec042" containerName="ceilometer-central-agent" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.742118 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.745075 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.745428 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.745757 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-dm4wb" podStartSLOduration=2.706621792 podStartE2EDuration="11.745731562s" podCreationTimestamp="2025-12-09 15:34:38 +0000 UTC" firstStartedPulling="2025-12-09 15:34:39.428153714 +0000 UTC m=+1566.582897702" lastFinishedPulling="2025-12-09 15:34:48.467263484 +0000 UTC m=+1575.622007472" observedRunningTime="2025-12-09 15:34:49.727109975 +0000 UTC m=+1576.881853963" watchObservedRunningTime="2025-12-09 15:34:49.745731562 +0000 UTC m=+1576.900475550" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.752058 4716 scope.go:117] "RemoveContainer" containerID="bf9d572dbce63d3ce353ec411aab2b7be0f0a93d4e21405e44472322334d26bd" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.793194 4716 scope.go:117] "RemoveContainer" containerID="7bcd9a422e3f6c94879f6d05238825cd8a88efbbb064ea62af0a135bfb6b7b0b" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.810325 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d03ea258-1672-476d-9357-dd4a98070676-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d03ea258-1672-476d-9357-dd4a98070676\") " pod="openstack/ceilometer-0" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.810404 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d03ea258-1672-476d-9357-dd4a98070676-scripts\") pod \"ceilometer-0\" (UID: \"d03ea258-1672-476d-9357-dd4a98070676\") " pod="openstack/ceilometer-0" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.810470 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lf6gp\" (UniqueName: \"kubernetes.io/projected/d03ea258-1672-476d-9357-dd4a98070676-kube-api-access-lf6gp\") pod \"ceilometer-0\" (UID: \"d03ea258-1672-476d-9357-dd4a98070676\") " pod="openstack/ceilometer-0" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.810485 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d03ea258-1672-476d-9357-dd4a98070676-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d03ea258-1672-476d-9357-dd4a98070676\") " pod="openstack/ceilometer-0" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.810557 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d03ea258-1672-476d-9357-dd4a98070676-log-httpd\") pod \"ceilometer-0\" (UID: \"d03ea258-1672-476d-9357-dd4a98070676\") " pod="openstack/ceilometer-0" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.810597 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d03ea258-1672-476d-9357-dd4a98070676-run-httpd\") pod \"ceilometer-0\" (UID: \"d03ea258-1672-476d-9357-dd4a98070676\") " pod="openstack/ceilometer-0" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.810711 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d03ea258-1672-476d-9357-dd4a98070676-config-data\") pod \"ceilometer-0\" (UID: \"d03ea258-1672-476d-9357-dd4a98070676\") " pod="openstack/ceilometer-0" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.820317 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.912497 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d03ea258-1672-476d-9357-dd4a98070676-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d03ea258-1672-476d-9357-dd4a98070676\") " pod="openstack/ceilometer-0" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.912609 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d03ea258-1672-476d-9357-dd4a98070676-log-httpd\") pod \"ceilometer-0\" (UID: \"d03ea258-1672-476d-9357-dd4a98070676\") " pod="openstack/ceilometer-0" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.912669 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d03ea258-1672-476d-9357-dd4a98070676-run-httpd\") pod \"ceilometer-0\" (UID: \"d03ea258-1672-476d-9357-dd4a98070676\") " pod="openstack/ceilometer-0" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.912718 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d03ea258-1672-476d-9357-dd4a98070676-config-data\") pod \"ceilometer-0\" (UID: \"d03ea258-1672-476d-9357-dd4a98070676\") " pod="openstack/ceilometer-0" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.912872 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d03ea258-1672-476d-9357-dd4a98070676-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d03ea258-1672-476d-9357-dd4a98070676\") " pod="openstack/ceilometer-0" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.912913 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d03ea258-1672-476d-9357-dd4a98070676-scripts\") pod \"ceilometer-0\" (UID: \"d03ea258-1672-476d-9357-dd4a98070676\") " pod="openstack/ceilometer-0" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.912979 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lf6gp\" (UniqueName: \"kubernetes.io/projected/d03ea258-1672-476d-9357-dd4a98070676-kube-api-access-lf6gp\") pod \"ceilometer-0\" (UID: \"d03ea258-1672-476d-9357-dd4a98070676\") " pod="openstack/ceilometer-0" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.915072 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d03ea258-1672-476d-9357-dd4a98070676-run-httpd\") pod \"ceilometer-0\" (UID: \"d03ea258-1672-476d-9357-dd4a98070676\") " pod="openstack/ceilometer-0" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.917476 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d03ea258-1672-476d-9357-dd4a98070676-log-httpd\") pod \"ceilometer-0\" (UID: \"d03ea258-1672-476d-9357-dd4a98070676\") " pod="openstack/ceilometer-0" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.920580 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d03ea258-1672-476d-9357-dd4a98070676-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d03ea258-1672-476d-9357-dd4a98070676\") " pod="openstack/ceilometer-0" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.921117 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d03ea258-1672-476d-9357-dd4a98070676-scripts\") pod \"ceilometer-0\" (UID: \"d03ea258-1672-476d-9357-dd4a98070676\") " pod="openstack/ceilometer-0" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.921912 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d03ea258-1672-476d-9357-dd4a98070676-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d03ea258-1672-476d-9357-dd4a98070676\") " pod="openstack/ceilometer-0" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.923342 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d03ea258-1672-476d-9357-dd4a98070676-config-data\") pod \"ceilometer-0\" (UID: \"d03ea258-1672-476d-9357-dd4a98070676\") " pod="openstack/ceilometer-0" Dec 09 15:34:49 crc kubenswrapper[4716]: I1209 15:34:49.933590 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lf6gp\" (UniqueName: \"kubernetes.io/projected/d03ea258-1672-476d-9357-dd4a98070676-kube-api-access-lf6gp\") pod \"ceilometer-0\" (UID: \"d03ea258-1672-476d-9357-dd4a98070676\") " pod="openstack/ceilometer-0" Dec 09 15:34:50 crc kubenswrapper[4716]: I1209 15:34:50.087658 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 15:34:50 crc kubenswrapper[4716]: I1209 15:34:50.590071 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:34:50 crc kubenswrapper[4716]: I1209 15:34:50.700134 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d03ea258-1672-476d-9357-dd4a98070676","Type":"ContainerStarted","Data":"28b3d27fbf5c3633c8f9f155986b000b4abddcb41c62ee778e64c9fc49fec806"} Dec 09 15:34:51 crc kubenswrapper[4716]: I1209 15:34:51.227211 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4af083f7-0a3a-4e5a-90f9-2a7f86fec042" path="/var/lib/kubelet/pods/4af083f7-0a3a-4e5a-90f9-2a7f86fec042/volumes" Dec 09 15:34:51 crc kubenswrapper[4716]: I1209 15:34:51.714520 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d03ea258-1672-476d-9357-dd4a98070676","Type":"ContainerStarted","Data":"85ed23c16caf8d6d69fd07a96d1a387a0bf71f9dd51f86519fe60c4c3c37e51d"} Dec 09 15:34:52 crc kubenswrapper[4716]: I1209 15:34:52.768298 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d03ea258-1672-476d-9357-dd4a98070676","Type":"ContainerStarted","Data":"f700bf7ba6e46c0ae3d914980bdd64931d626e560b23f0fefe8b7369c3acf1a6"} Dec 09 15:34:53 crc kubenswrapper[4716]: I1209 15:34:53.782189 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d03ea258-1672-476d-9357-dd4a98070676","Type":"ContainerStarted","Data":"8c2d0ff3a6c677cfdcdcdbc926dcc71f9e374fa0b9d40a4503fb24fefc6f7b01"} Dec 09 15:34:54 crc kubenswrapper[4716]: I1209 15:34:54.119543 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 09 15:34:54 crc kubenswrapper[4716]: I1209 15:34:54.120124 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="381de784-1e86-405c-84a0-169fd93d2ef2" containerName="glance-log" containerID="cri-o://d6a42dcf537008f4f39a0f551b4a0d479fc86d2c5cf5f63a92c092513609ccf6" gracePeriod=30 Dec 09 15:34:54 crc kubenswrapper[4716]: I1209 15:34:54.120224 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="381de784-1e86-405c-84a0-169fd93d2ef2" containerName="glance-httpd" containerID="cri-o://e0b3bce6e4f911d56460e7e6c57e92cb0818375ba9c445e1cb2d33a6fcc8253c" gracePeriod=30 Dec 09 15:34:54 crc kubenswrapper[4716]: I1209 15:34:54.840866 4716 generic.go:334] "Generic (PLEG): container finished" podID="381de784-1e86-405c-84a0-169fd93d2ef2" containerID="d6a42dcf537008f4f39a0f551b4a0d479fc86d2c5cf5f63a92c092513609ccf6" exitCode=143 Dec 09 15:34:54 crc kubenswrapper[4716]: I1209 15:34:54.841172 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"381de784-1e86-405c-84a0-169fd93d2ef2","Type":"ContainerDied","Data":"d6a42dcf537008f4f39a0f551b4a0d479fc86d2c5cf5f63a92c092513609ccf6"} Dec 09 15:34:55 crc kubenswrapper[4716]: I1209 15:34:55.516474 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 09 15:34:55 crc kubenswrapper[4716]: I1209 15:34:55.517098 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="c0b9015f-858a-47f4-b619-a47e54c09d03" containerName="glance-log" containerID="cri-o://dff61c3808e480eb5344a9679d346371a6ed83e46e414d3b1288cb4d60a7b29a" gracePeriod=30 Dec 09 15:34:55 crc kubenswrapper[4716]: I1209 15:34:55.517197 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="c0b9015f-858a-47f4-b619-a47e54c09d03" containerName="glance-httpd" containerID="cri-o://f9b61364f0ef1deff6be397a1c18bbaf24bc9a0bbec192df0cc269de6d21535b" gracePeriod=30 Dec 09 15:34:55 crc kubenswrapper[4716]: I1209 15:34:55.855437 4716 generic.go:334] "Generic (PLEG): container finished" podID="c0b9015f-858a-47f4-b619-a47e54c09d03" containerID="dff61c3808e480eb5344a9679d346371a6ed83e46e414d3b1288cb4d60a7b29a" exitCode=143 Dec 09 15:34:55 crc kubenswrapper[4716]: I1209 15:34:55.855519 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c0b9015f-858a-47f4-b619-a47e54c09d03","Type":"ContainerDied","Data":"dff61c3808e480eb5344a9679d346371a6ed83e46e414d3b1288cb4d60a7b29a"} Dec 09 15:34:55 crc kubenswrapper[4716]: I1209 15:34:55.858680 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d03ea258-1672-476d-9357-dd4a98070676","Type":"ContainerStarted","Data":"db295666a97b2504e4d59f496499001d8869474b73649961b186327596edde5e"} Dec 09 15:34:55 crc kubenswrapper[4716]: I1209 15:34:55.858820 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 09 15:34:55 crc kubenswrapper[4716]: I1209 15:34:55.915425 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.44356201 podStartE2EDuration="6.915400954s" podCreationTimestamp="2025-12-09 15:34:49 +0000 UTC" firstStartedPulling="2025-12-09 15:34:50.584643478 +0000 UTC m=+1577.739387466" lastFinishedPulling="2025-12-09 15:34:55.056482422 +0000 UTC m=+1582.211226410" observedRunningTime="2025-12-09 15:34:55.908986952 +0000 UTC m=+1583.063730940" watchObservedRunningTime="2025-12-09 15:34:55.915400954 +0000 UTC m=+1583.070144932" Dec 09 15:34:56 crc kubenswrapper[4716]: I1209 15:34:56.211438 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:34:57 crc kubenswrapper[4716]: I1209 15:34:57.891010 4716 generic.go:334] "Generic (PLEG): container finished" podID="381de784-1e86-405c-84a0-169fd93d2ef2" containerID="e0b3bce6e4f911d56460e7e6c57e92cb0818375ba9c445e1cb2d33a6fcc8253c" exitCode=0 Dec 09 15:34:57 crc kubenswrapper[4716]: I1209 15:34:57.891092 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"381de784-1e86-405c-84a0-169fd93d2ef2","Type":"ContainerDied","Data":"e0b3bce6e4f911d56460e7e6c57e92cb0818375ba9c445e1cb2d33a6fcc8253c"} Dec 09 15:34:57 crc kubenswrapper[4716]: I1209 15:34:57.891408 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"381de784-1e86-405c-84a0-169fd93d2ef2","Type":"ContainerDied","Data":"64706d0a23ca94b52a9cae99bcfe6bf65826545b4dd6b70b66eef90ff19414cf"} Dec 09 15:34:57 crc kubenswrapper[4716]: I1209 15:34:57.891422 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="64706d0a23ca94b52a9cae99bcfe6bf65826545b4dd6b70b66eef90ff19414cf" Dec 09 15:34:57 crc kubenswrapper[4716]: I1209 15:34:57.891597 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d03ea258-1672-476d-9357-dd4a98070676" containerName="ceilometer-central-agent" containerID="cri-o://85ed23c16caf8d6d69fd07a96d1a387a0bf71f9dd51f86519fe60c4c3c37e51d" gracePeriod=30 Dec 09 15:34:57 crc kubenswrapper[4716]: I1209 15:34:57.892178 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d03ea258-1672-476d-9357-dd4a98070676" containerName="proxy-httpd" containerID="cri-o://db295666a97b2504e4d59f496499001d8869474b73649961b186327596edde5e" gracePeriod=30 Dec 09 15:34:57 crc kubenswrapper[4716]: I1209 15:34:57.892233 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d03ea258-1672-476d-9357-dd4a98070676" containerName="sg-core" containerID="cri-o://8c2d0ff3a6c677cfdcdcdbc926dcc71f9e374fa0b9d40a4503fb24fefc6f7b01" gracePeriod=30 Dec 09 15:34:57 crc kubenswrapper[4716]: I1209 15:34:57.892264 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d03ea258-1672-476d-9357-dd4a98070676" containerName="ceilometer-notification-agent" containerID="cri-o://f700bf7ba6e46c0ae3d914980bdd64931d626e560b23f0fefe8b7369c3acf1a6" gracePeriod=30 Dec 09 15:34:57 crc kubenswrapper[4716]: I1209 15:34:57.965660 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 09 15:34:58 crc kubenswrapper[4716]: I1209 15:34:58.034019 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/381de784-1e86-405c-84a0-169fd93d2ef2-scripts\") pod \"381de784-1e86-405c-84a0-169fd93d2ef2\" (UID: \"381de784-1e86-405c-84a0-169fd93d2ef2\") " Dec 09 15:34:58 crc kubenswrapper[4716]: I1209 15:34:58.034076 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/381de784-1e86-405c-84a0-169fd93d2ef2-combined-ca-bundle\") pod \"381de784-1e86-405c-84a0-169fd93d2ef2\" (UID: \"381de784-1e86-405c-84a0-169fd93d2ef2\") " Dec 09 15:34:58 crc kubenswrapper[4716]: I1209 15:34:58.034108 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/381de784-1e86-405c-84a0-169fd93d2ef2-public-tls-certs\") pod \"381de784-1e86-405c-84a0-169fd93d2ef2\" (UID: \"381de784-1e86-405c-84a0-169fd93d2ef2\") " Dec 09 15:34:58 crc kubenswrapper[4716]: I1209 15:34:58.034132 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/381de784-1e86-405c-84a0-169fd93d2ef2-logs\") pod \"381de784-1e86-405c-84a0-169fd93d2ef2\" (UID: \"381de784-1e86-405c-84a0-169fd93d2ef2\") " Dec 09 15:34:58 crc kubenswrapper[4716]: I1209 15:34:58.034175 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/381de784-1e86-405c-84a0-169fd93d2ef2-httpd-run\") pod \"381de784-1e86-405c-84a0-169fd93d2ef2\" (UID: \"381de784-1e86-405c-84a0-169fd93d2ef2\") " Dec 09 15:34:58 crc kubenswrapper[4716]: I1209 15:34:58.034230 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xxrxc\" (UniqueName: \"kubernetes.io/projected/381de784-1e86-405c-84a0-169fd93d2ef2-kube-api-access-xxrxc\") pod \"381de784-1e86-405c-84a0-169fd93d2ef2\" (UID: \"381de784-1e86-405c-84a0-169fd93d2ef2\") " Dec 09 15:34:58 crc kubenswrapper[4716]: I1209 15:34:58.034257 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"381de784-1e86-405c-84a0-169fd93d2ef2\" (UID: \"381de784-1e86-405c-84a0-169fd93d2ef2\") " Dec 09 15:34:58 crc kubenswrapper[4716]: I1209 15:34:58.034335 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/381de784-1e86-405c-84a0-169fd93d2ef2-config-data\") pod \"381de784-1e86-405c-84a0-169fd93d2ef2\" (UID: \"381de784-1e86-405c-84a0-169fd93d2ef2\") " Dec 09 15:34:58 crc kubenswrapper[4716]: I1209 15:34:58.036338 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/381de784-1e86-405c-84a0-169fd93d2ef2-logs" (OuterVolumeSpecName: "logs") pod "381de784-1e86-405c-84a0-169fd93d2ef2" (UID: "381de784-1e86-405c-84a0-169fd93d2ef2"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:34:58 crc kubenswrapper[4716]: I1209 15:34:58.046050 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/381de784-1e86-405c-84a0-169fd93d2ef2-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "381de784-1e86-405c-84a0-169fd93d2ef2" (UID: "381de784-1e86-405c-84a0-169fd93d2ef2"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:34:58 crc kubenswrapper[4716]: I1209 15:34:58.046797 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/381de784-1e86-405c-84a0-169fd93d2ef2-kube-api-access-xxrxc" (OuterVolumeSpecName: "kube-api-access-xxrxc") pod "381de784-1e86-405c-84a0-169fd93d2ef2" (UID: "381de784-1e86-405c-84a0-169fd93d2ef2"). InnerVolumeSpecName "kube-api-access-xxrxc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:34:58 crc kubenswrapper[4716]: I1209 15:34:58.054881 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/381de784-1e86-405c-84a0-169fd93d2ef2-scripts" (OuterVolumeSpecName: "scripts") pod "381de784-1e86-405c-84a0-169fd93d2ef2" (UID: "381de784-1e86-405c-84a0-169fd93d2ef2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:34:58 crc kubenswrapper[4716]: I1209 15:34:58.064823 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "glance") pod "381de784-1e86-405c-84a0-169fd93d2ef2" (UID: "381de784-1e86-405c-84a0-169fd93d2ef2"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 09 15:34:58 crc kubenswrapper[4716]: I1209 15:34:58.125876 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/381de784-1e86-405c-84a0-169fd93d2ef2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "381de784-1e86-405c-84a0-169fd93d2ef2" (UID: "381de784-1e86-405c-84a0-169fd93d2ef2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:34:58 crc kubenswrapper[4716]: I1209 15:34:58.138125 4716 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/381de784-1e86-405c-84a0-169fd93d2ef2-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:58 crc kubenswrapper[4716]: I1209 15:34:58.138169 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/381de784-1e86-405c-84a0-169fd93d2ef2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:58 crc kubenswrapper[4716]: I1209 15:34:58.138184 4716 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/381de784-1e86-405c-84a0-169fd93d2ef2-logs\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:58 crc kubenswrapper[4716]: I1209 15:34:58.138198 4716 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/381de784-1e86-405c-84a0-169fd93d2ef2-httpd-run\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:58 crc kubenswrapper[4716]: I1209 15:34:58.138209 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xxrxc\" (UniqueName: \"kubernetes.io/projected/381de784-1e86-405c-84a0-169fd93d2ef2-kube-api-access-xxrxc\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:58 crc kubenswrapper[4716]: I1209 15:34:58.138240 4716 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Dec 09 15:34:58 crc kubenswrapper[4716]: I1209 15:34:58.161720 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/381de784-1e86-405c-84a0-169fd93d2ef2-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "381de784-1e86-405c-84a0-169fd93d2ef2" (UID: "381de784-1e86-405c-84a0-169fd93d2ef2"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:34:58 crc kubenswrapper[4716]: I1209 15:34:58.174457 4716 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Dec 09 15:34:58 crc kubenswrapper[4716]: I1209 15:34:58.239039 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/381de784-1e86-405c-84a0-169fd93d2ef2-config-data" (OuterVolumeSpecName: "config-data") pod "381de784-1e86-405c-84a0-169fd93d2ef2" (UID: "381de784-1e86-405c-84a0-169fd93d2ef2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:34:58 crc kubenswrapper[4716]: I1209 15:34:58.240951 4716 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:58 crc kubenswrapper[4716]: I1209 15:34:58.240990 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/381de784-1e86-405c-84a0-169fd93d2ef2-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:58 crc kubenswrapper[4716]: I1209 15:34:58.241004 4716 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/381de784-1e86-405c-84a0-169fd93d2ef2-public-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 09 15:34:58 crc kubenswrapper[4716]: I1209 15:34:58.909642 4716 generic.go:334] "Generic (PLEG): container finished" podID="d03ea258-1672-476d-9357-dd4a98070676" containerID="db295666a97b2504e4d59f496499001d8869474b73649961b186327596edde5e" exitCode=0 Dec 09 15:34:58 crc kubenswrapper[4716]: I1209 15:34:58.910008 4716 generic.go:334] "Generic (PLEG): container finished" podID="d03ea258-1672-476d-9357-dd4a98070676" containerID="8c2d0ff3a6c677cfdcdcdbc926dcc71f9e374fa0b9d40a4503fb24fefc6f7b01" exitCode=2 Dec 09 15:34:58 crc kubenswrapper[4716]: I1209 15:34:58.910021 4716 generic.go:334] "Generic (PLEG): container finished" podID="d03ea258-1672-476d-9357-dd4a98070676" containerID="f700bf7ba6e46c0ae3d914980bdd64931d626e560b23f0fefe8b7369c3acf1a6" exitCode=0 Dec 09 15:34:58 crc kubenswrapper[4716]: I1209 15:34:58.910097 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 09 15:34:58 crc kubenswrapper[4716]: I1209 15:34:58.909685 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d03ea258-1672-476d-9357-dd4a98070676","Type":"ContainerDied","Data":"db295666a97b2504e4d59f496499001d8869474b73649961b186327596edde5e"} Dec 09 15:34:58 crc kubenswrapper[4716]: I1209 15:34:58.910212 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d03ea258-1672-476d-9357-dd4a98070676","Type":"ContainerDied","Data":"8c2d0ff3a6c677cfdcdcdbc926dcc71f9e374fa0b9d40a4503fb24fefc6f7b01"} Dec 09 15:34:58 crc kubenswrapper[4716]: I1209 15:34:58.910237 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d03ea258-1672-476d-9357-dd4a98070676","Type":"ContainerDied","Data":"f700bf7ba6e46c0ae3d914980bdd64931d626e560b23f0fefe8b7369c3acf1a6"} Dec 09 15:34:58 crc kubenswrapper[4716]: I1209 15:34:58.959589 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 09 15:34:58 crc kubenswrapper[4716]: I1209 15:34:58.980980 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 09 15:34:59 crc kubenswrapper[4716]: I1209 15:34:59.016751 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Dec 09 15:34:59 crc kubenswrapper[4716]: E1209 15:34:59.017405 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="381de784-1e86-405c-84a0-169fd93d2ef2" containerName="glance-httpd" Dec 09 15:34:59 crc kubenswrapper[4716]: I1209 15:34:59.017430 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="381de784-1e86-405c-84a0-169fd93d2ef2" containerName="glance-httpd" Dec 09 15:34:59 crc kubenswrapper[4716]: E1209 15:34:59.017451 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="381de784-1e86-405c-84a0-169fd93d2ef2" containerName="glance-log" Dec 09 15:34:59 crc kubenswrapper[4716]: I1209 15:34:59.017463 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="381de784-1e86-405c-84a0-169fd93d2ef2" containerName="glance-log" Dec 09 15:34:59 crc kubenswrapper[4716]: I1209 15:34:59.017730 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="381de784-1e86-405c-84a0-169fd93d2ef2" containerName="glance-log" Dec 09 15:34:59 crc kubenswrapper[4716]: I1209 15:34:59.017764 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="381de784-1e86-405c-84a0-169fd93d2ef2" containerName="glance-httpd" Dec 09 15:34:59 crc kubenswrapper[4716]: I1209 15:34:59.019213 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 09 15:34:59 crc kubenswrapper[4716]: I1209 15:34:59.023546 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Dec 09 15:34:59 crc kubenswrapper[4716]: I1209 15:34:59.024046 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Dec 09 15:34:59 crc kubenswrapper[4716]: I1209 15:34:59.159390 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4sjd8\" (UniqueName: \"kubernetes.io/projected/f3b8d20d-33fa-407b-9d95-17f9321fdf08-kube-api-access-4sjd8\") pod \"glance-default-external-api-0\" (UID: \"f3b8d20d-33fa-407b-9d95-17f9321fdf08\") " pod="openstack/glance-default-external-api-0" Dec 09 15:34:59 crc kubenswrapper[4716]: I1209 15:34:59.159439 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3b8d20d-33fa-407b-9d95-17f9321fdf08-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"f3b8d20d-33fa-407b-9d95-17f9321fdf08\") " pod="openstack/glance-default-external-api-0" Dec 09 15:34:59 crc kubenswrapper[4716]: I1209 15:34:59.159661 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3b8d20d-33fa-407b-9d95-17f9321fdf08-scripts\") pod \"glance-default-external-api-0\" (UID: \"f3b8d20d-33fa-407b-9d95-17f9321fdf08\") " pod="openstack/glance-default-external-api-0" Dec 09 15:34:59 crc kubenswrapper[4716]: I1209 15:34:59.159797 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f3b8d20d-33fa-407b-9d95-17f9321fdf08-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"f3b8d20d-33fa-407b-9d95-17f9321fdf08\") " pod="openstack/glance-default-external-api-0" Dec 09 15:34:59 crc kubenswrapper[4716]: I1209 15:34:59.159826 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f3b8d20d-33fa-407b-9d95-17f9321fdf08-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"f3b8d20d-33fa-407b-9d95-17f9321fdf08\") " pod="openstack/glance-default-external-api-0" Dec 09 15:34:59 crc kubenswrapper[4716]: I1209 15:34:59.159906 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"f3b8d20d-33fa-407b-9d95-17f9321fdf08\") " pod="openstack/glance-default-external-api-0" Dec 09 15:34:59 crc kubenswrapper[4716]: I1209 15:34:59.160117 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f3b8d20d-33fa-407b-9d95-17f9321fdf08-logs\") pod \"glance-default-external-api-0\" (UID: \"f3b8d20d-33fa-407b-9d95-17f9321fdf08\") " pod="openstack/glance-default-external-api-0" Dec 09 15:34:59 crc kubenswrapper[4716]: I1209 15:34:59.160248 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3b8d20d-33fa-407b-9d95-17f9321fdf08-config-data\") pod \"glance-default-external-api-0\" (UID: \"f3b8d20d-33fa-407b-9d95-17f9321fdf08\") " pod="openstack/glance-default-external-api-0" Dec 09 15:34:59 crc kubenswrapper[4716]: I1209 15:34:59.228264 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="381de784-1e86-405c-84a0-169fd93d2ef2" path="/var/lib/kubelet/pods/381de784-1e86-405c-84a0-169fd93d2ef2/volumes" Dec 09 15:34:59 crc kubenswrapper[4716]: I1209 15:34:59.247983 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 09 15:34:59 crc kubenswrapper[4716]: I1209 15:34:59.262036 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3b8d20d-33fa-407b-9d95-17f9321fdf08-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"f3b8d20d-33fa-407b-9d95-17f9321fdf08\") " pod="openstack/glance-default-external-api-0" Dec 09 15:34:59 crc kubenswrapper[4716]: I1209 15:34:59.262123 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3b8d20d-33fa-407b-9d95-17f9321fdf08-scripts\") pod \"glance-default-external-api-0\" (UID: \"f3b8d20d-33fa-407b-9d95-17f9321fdf08\") " pod="openstack/glance-default-external-api-0" Dec 09 15:34:59 crc kubenswrapper[4716]: I1209 15:34:59.262185 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f3b8d20d-33fa-407b-9d95-17f9321fdf08-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"f3b8d20d-33fa-407b-9d95-17f9321fdf08\") " pod="openstack/glance-default-external-api-0" Dec 09 15:34:59 crc kubenswrapper[4716]: I1209 15:34:59.262213 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f3b8d20d-33fa-407b-9d95-17f9321fdf08-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"f3b8d20d-33fa-407b-9d95-17f9321fdf08\") " pod="openstack/glance-default-external-api-0" Dec 09 15:34:59 crc kubenswrapper[4716]: I1209 15:34:59.262249 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"f3b8d20d-33fa-407b-9d95-17f9321fdf08\") " pod="openstack/glance-default-external-api-0" Dec 09 15:34:59 crc kubenswrapper[4716]: I1209 15:34:59.262349 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f3b8d20d-33fa-407b-9d95-17f9321fdf08-logs\") pod \"glance-default-external-api-0\" (UID: \"f3b8d20d-33fa-407b-9d95-17f9321fdf08\") " pod="openstack/glance-default-external-api-0" Dec 09 15:34:59 crc kubenswrapper[4716]: I1209 15:34:59.262416 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3b8d20d-33fa-407b-9d95-17f9321fdf08-config-data\") pod \"glance-default-external-api-0\" (UID: \"f3b8d20d-33fa-407b-9d95-17f9321fdf08\") " pod="openstack/glance-default-external-api-0" Dec 09 15:34:59 crc kubenswrapper[4716]: I1209 15:34:59.262590 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4sjd8\" (UniqueName: \"kubernetes.io/projected/f3b8d20d-33fa-407b-9d95-17f9321fdf08-kube-api-access-4sjd8\") pod \"glance-default-external-api-0\" (UID: \"f3b8d20d-33fa-407b-9d95-17f9321fdf08\") " pod="openstack/glance-default-external-api-0" Dec 09 15:34:59 crc kubenswrapper[4716]: I1209 15:34:59.263187 4716 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"f3b8d20d-33fa-407b-9d95-17f9321fdf08\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/glance-default-external-api-0" Dec 09 15:34:59 crc kubenswrapper[4716]: I1209 15:34:59.263388 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f3b8d20d-33fa-407b-9d95-17f9321fdf08-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"f3b8d20d-33fa-407b-9d95-17f9321fdf08\") " pod="openstack/glance-default-external-api-0" Dec 09 15:34:59 crc kubenswrapper[4716]: I1209 15:34:59.263398 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f3b8d20d-33fa-407b-9d95-17f9321fdf08-logs\") pod \"glance-default-external-api-0\" (UID: \"f3b8d20d-33fa-407b-9d95-17f9321fdf08\") " pod="openstack/glance-default-external-api-0" Dec 09 15:34:59 crc kubenswrapper[4716]: I1209 15:34:59.270108 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3b8d20d-33fa-407b-9d95-17f9321fdf08-scripts\") pod \"glance-default-external-api-0\" (UID: \"f3b8d20d-33fa-407b-9d95-17f9321fdf08\") " pod="openstack/glance-default-external-api-0" Dec 09 15:34:59 crc kubenswrapper[4716]: I1209 15:34:59.270108 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3b8d20d-33fa-407b-9d95-17f9321fdf08-config-data\") pod \"glance-default-external-api-0\" (UID: \"f3b8d20d-33fa-407b-9d95-17f9321fdf08\") " pod="openstack/glance-default-external-api-0" Dec 09 15:34:59 crc kubenswrapper[4716]: I1209 15:34:59.281212 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f3b8d20d-33fa-407b-9d95-17f9321fdf08-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"f3b8d20d-33fa-407b-9d95-17f9321fdf08\") " pod="openstack/glance-default-external-api-0" Dec 09 15:34:59 crc kubenswrapper[4716]: I1209 15:34:59.286732 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3b8d20d-33fa-407b-9d95-17f9321fdf08-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"f3b8d20d-33fa-407b-9d95-17f9321fdf08\") " pod="openstack/glance-default-external-api-0" Dec 09 15:34:59 crc kubenswrapper[4716]: I1209 15:34:59.289453 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4sjd8\" (UniqueName: \"kubernetes.io/projected/f3b8d20d-33fa-407b-9d95-17f9321fdf08-kube-api-access-4sjd8\") pod \"glance-default-external-api-0\" (UID: \"f3b8d20d-33fa-407b-9d95-17f9321fdf08\") " pod="openstack/glance-default-external-api-0" Dec 09 15:34:59 crc kubenswrapper[4716]: I1209 15:34:59.305688 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"f3b8d20d-33fa-407b-9d95-17f9321fdf08\") " pod="openstack/glance-default-external-api-0" Dec 09 15:34:59 crc kubenswrapper[4716]: I1209 15:34:59.336766 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 09 15:34:59 crc kubenswrapper[4716]: I1209 15:34:59.925385 4716 generic.go:334] "Generic (PLEG): container finished" podID="c0b9015f-858a-47f4-b619-a47e54c09d03" containerID="f9b61364f0ef1deff6be397a1c18bbaf24bc9a0bbec192df0cc269de6d21535b" exitCode=0 Dec 09 15:34:59 crc kubenswrapper[4716]: I1209 15:34:59.925494 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c0b9015f-858a-47f4-b619-a47e54c09d03","Type":"ContainerDied","Data":"f9b61364f0ef1deff6be397a1c18bbaf24bc9a0bbec192df0cc269de6d21535b"} Dec 09 15:35:00 crc kubenswrapper[4716]: I1209 15:35:00.197097 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 09 15:35:00 crc kubenswrapper[4716]: I1209 15:35:00.945111 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c0b9015f-858a-47f4-b619-a47e54c09d03","Type":"ContainerDied","Data":"9145934aa78d71cdc94b5cda9a8287a5e7c863d6195f4fd7d792c252ef090cfc"} Dec 09 15:35:00 crc kubenswrapper[4716]: I1209 15:35:00.945392 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9145934aa78d71cdc94b5cda9a8287a5e7c863d6195f4fd7d792c252ef090cfc" Dec 09 15:35:00 crc kubenswrapper[4716]: I1209 15:35:00.947957 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f3b8d20d-33fa-407b-9d95-17f9321fdf08","Type":"ContainerStarted","Data":"a80dca5b484bee7bd7f1361477780451857188dae2c406f588e2e611b28fa1b9"} Dec 09 15:35:01 crc kubenswrapper[4716]: I1209 15:35:01.020007 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 09 15:35:01 crc kubenswrapper[4716]: I1209 15:35:01.114858 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"c0b9015f-858a-47f4-b619-a47e54c09d03\" (UID: \"c0b9015f-858a-47f4-b619-a47e54c09d03\") " Dec 09 15:35:01 crc kubenswrapper[4716]: I1209 15:35:01.114915 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0b9015f-858a-47f4-b619-a47e54c09d03-combined-ca-bundle\") pod \"c0b9015f-858a-47f4-b619-a47e54c09d03\" (UID: \"c0b9015f-858a-47f4-b619-a47e54c09d03\") " Dec 09 15:35:01 crc kubenswrapper[4716]: I1209 15:35:01.114998 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hzqqq\" (UniqueName: \"kubernetes.io/projected/c0b9015f-858a-47f4-b619-a47e54c09d03-kube-api-access-hzqqq\") pod \"c0b9015f-858a-47f4-b619-a47e54c09d03\" (UID: \"c0b9015f-858a-47f4-b619-a47e54c09d03\") " Dec 09 15:35:01 crc kubenswrapper[4716]: I1209 15:35:01.115027 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c0b9015f-858a-47f4-b619-a47e54c09d03-httpd-run\") pod \"c0b9015f-858a-47f4-b619-a47e54c09d03\" (UID: \"c0b9015f-858a-47f4-b619-a47e54c09d03\") " Dec 09 15:35:01 crc kubenswrapper[4716]: I1209 15:35:01.115064 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0b9015f-858a-47f4-b619-a47e54c09d03-config-data\") pod \"c0b9015f-858a-47f4-b619-a47e54c09d03\" (UID: \"c0b9015f-858a-47f4-b619-a47e54c09d03\") " Dec 09 15:35:01 crc kubenswrapper[4716]: I1209 15:35:01.115084 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c0b9015f-858a-47f4-b619-a47e54c09d03-internal-tls-certs\") pod \"c0b9015f-858a-47f4-b619-a47e54c09d03\" (UID: \"c0b9015f-858a-47f4-b619-a47e54c09d03\") " Dec 09 15:35:01 crc kubenswrapper[4716]: I1209 15:35:01.115240 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c0b9015f-858a-47f4-b619-a47e54c09d03-logs\") pod \"c0b9015f-858a-47f4-b619-a47e54c09d03\" (UID: \"c0b9015f-858a-47f4-b619-a47e54c09d03\") " Dec 09 15:35:01 crc kubenswrapper[4716]: I1209 15:35:01.115334 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c0b9015f-858a-47f4-b619-a47e54c09d03-scripts\") pod \"c0b9015f-858a-47f4-b619-a47e54c09d03\" (UID: \"c0b9015f-858a-47f4-b619-a47e54c09d03\") " Dec 09 15:35:01 crc kubenswrapper[4716]: I1209 15:35:01.115577 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c0b9015f-858a-47f4-b619-a47e54c09d03-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "c0b9015f-858a-47f4-b619-a47e54c09d03" (UID: "c0b9015f-858a-47f4-b619-a47e54c09d03"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:35:01 crc kubenswrapper[4716]: I1209 15:35:01.116304 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c0b9015f-858a-47f4-b619-a47e54c09d03-logs" (OuterVolumeSpecName: "logs") pod "c0b9015f-858a-47f4-b619-a47e54c09d03" (UID: "c0b9015f-858a-47f4-b619-a47e54c09d03"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:35:01 crc kubenswrapper[4716]: I1209 15:35:01.116339 4716 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c0b9015f-858a-47f4-b619-a47e54c09d03-httpd-run\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:01 crc kubenswrapper[4716]: I1209 15:35:01.126719 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c0b9015f-858a-47f4-b619-a47e54c09d03-kube-api-access-hzqqq" (OuterVolumeSpecName: "kube-api-access-hzqqq") pod "c0b9015f-858a-47f4-b619-a47e54c09d03" (UID: "c0b9015f-858a-47f4-b619-a47e54c09d03"). InnerVolumeSpecName "kube-api-access-hzqqq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:35:01 crc kubenswrapper[4716]: I1209 15:35:01.129144 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0b9015f-858a-47f4-b619-a47e54c09d03-scripts" (OuterVolumeSpecName: "scripts") pod "c0b9015f-858a-47f4-b619-a47e54c09d03" (UID: "c0b9015f-858a-47f4-b619-a47e54c09d03"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:35:01 crc kubenswrapper[4716]: I1209 15:35:01.132853 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "glance") pod "c0b9015f-858a-47f4-b619-a47e54c09d03" (UID: "c0b9015f-858a-47f4-b619-a47e54c09d03"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 09 15:35:01 crc kubenswrapper[4716]: I1209 15:35:01.218918 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0b9015f-858a-47f4-b619-a47e54c09d03-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c0b9015f-858a-47f4-b619-a47e54c09d03" (UID: "c0b9015f-858a-47f4-b619-a47e54c09d03"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:35:01 crc kubenswrapper[4716]: I1209 15:35:01.219282 4716 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Dec 09 15:35:01 crc kubenswrapper[4716]: I1209 15:35:01.219302 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0b9015f-858a-47f4-b619-a47e54c09d03-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:01 crc kubenswrapper[4716]: I1209 15:35:01.219315 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hzqqq\" (UniqueName: \"kubernetes.io/projected/c0b9015f-858a-47f4-b619-a47e54c09d03-kube-api-access-hzqqq\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:01 crc kubenswrapper[4716]: I1209 15:35:01.219324 4716 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c0b9015f-858a-47f4-b619-a47e54c09d03-logs\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:01 crc kubenswrapper[4716]: I1209 15:35:01.219333 4716 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c0b9015f-858a-47f4-b619-a47e54c09d03-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:01 crc kubenswrapper[4716]: I1209 15:35:01.224273 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0b9015f-858a-47f4-b619-a47e54c09d03-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "c0b9015f-858a-47f4-b619-a47e54c09d03" (UID: "c0b9015f-858a-47f4-b619-a47e54c09d03"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:35:01 crc kubenswrapper[4716]: I1209 15:35:01.249311 4716 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Dec 09 15:35:01 crc kubenswrapper[4716]: I1209 15:35:01.251062 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0b9015f-858a-47f4-b619-a47e54c09d03-config-data" (OuterVolumeSpecName: "config-data") pod "c0b9015f-858a-47f4-b619-a47e54c09d03" (UID: "c0b9015f-858a-47f4-b619-a47e54c09d03"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:35:01 crc kubenswrapper[4716]: I1209 15:35:01.322066 4716 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:01 crc kubenswrapper[4716]: I1209 15:35:01.322130 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0b9015f-858a-47f4-b619-a47e54c09d03-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:01 crc kubenswrapper[4716]: I1209 15:35:01.322147 4716 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c0b9015f-858a-47f4-b619-a47e54c09d03-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:01 crc kubenswrapper[4716]: I1209 15:35:01.962271 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 09 15:35:01 crc kubenswrapper[4716]: I1209 15:35:01.962489 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f3b8d20d-33fa-407b-9d95-17f9321fdf08","Type":"ContainerStarted","Data":"f8141f8291f0164a11fe1705ec775318390bce20234bf6301258716b63e796b0"} Dec 09 15:35:01 crc kubenswrapper[4716]: I1209 15:35:01.963864 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f3b8d20d-33fa-407b-9d95-17f9321fdf08","Type":"ContainerStarted","Data":"ca238c06358c40f22069d27238e25d5afbe4aab5b69d6bf961f070518048d84c"} Dec 09 15:35:02 crc kubenswrapper[4716]: I1209 15:35:02.029207 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.029183521 podStartE2EDuration="4.029183521s" podCreationTimestamp="2025-12-09 15:34:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:35:02.024158499 +0000 UTC m=+1589.178902487" watchObservedRunningTime="2025-12-09 15:35:02.029183521 +0000 UTC m=+1589.183927509" Dec 09 15:35:02 crc kubenswrapper[4716]: I1209 15:35:02.062813 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 09 15:35:02 crc kubenswrapper[4716]: I1209 15:35:02.083784 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 09 15:35:02 crc kubenswrapper[4716]: I1209 15:35:02.098422 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 09 15:35:02 crc kubenswrapper[4716]: E1209 15:35:02.098994 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0b9015f-858a-47f4-b619-a47e54c09d03" containerName="glance-httpd" Dec 09 15:35:02 crc kubenswrapper[4716]: I1209 15:35:02.099013 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0b9015f-858a-47f4-b619-a47e54c09d03" containerName="glance-httpd" Dec 09 15:35:02 crc kubenswrapper[4716]: E1209 15:35:02.099048 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0b9015f-858a-47f4-b619-a47e54c09d03" containerName="glance-log" Dec 09 15:35:02 crc kubenswrapper[4716]: I1209 15:35:02.099056 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0b9015f-858a-47f4-b619-a47e54c09d03" containerName="glance-log" Dec 09 15:35:02 crc kubenswrapper[4716]: I1209 15:35:02.099321 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0b9015f-858a-47f4-b619-a47e54c09d03" containerName="glance-log" Dec 09 15:35:02 crc kubenswrapper[4716]: I1209 15:35:02.099349 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0b9015f-858a-47f4-b619-a47e54c09d03" containerName="glance-httpd" Dec 09 15:35:02 crc kubenswrapper[4716]: I1209 15:35:02.100732 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 09 15:35:02 crc kubenswrapper[4716]: I1209 15:35:02.106468 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Dec 09 15:35:02 crc kubenswrapper[4716]: I1209 15:35:02.107231 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Dec 09 15:35:02 crc kubenswrapper[4716]: I1209 15:35:02.117553 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 09 15:35:02 crc kubenswrapper[4716]: I1209 15:35:02.244581 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/23f21e51-c98a-4198-9e0f-a4e7494c10e8-scripts\") pod \"glance-default-internal-api-0\" (UID: \"23f21e51-c98a-4198-9e0f-a4e7494c10e8\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:35:02 crc kubenswrapper[4716]: I1209 15:35:02.244698 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sj9c2\" (UniqueName: \"kubernetes.io/projected/23f21e51-c98a-4198-9e0f-a4e7494c10e8-kube-api-access-sj9c2\") pod \"glance-default-internal-api-0\" (UID: \"23f21e51-c98a-4198-9e0f-a4e7494c10e8\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:35:02 crc kubenswrapper[4716]: I1209 15:35:02.244723 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/23f21e51-c98a-4198-9e0f-a4e7494c10e8-logs\") pod \"glance-default-internal-api-0\" (UID: \"23f21e51-c98a-4198-9e0f-a4e7494c10e8\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:35:02 crc kubenswrapper[4716]: I1209 15:35:02.244811 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"23f21e51-c98a-4198-9e0f-a4e7494c10e8\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:35:02 crc kubenswrapper[4716]: I1209 15:35:02.244868 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/23f21e51-c98a-4198-9e0f-a4e7494c10e8-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"23f21e51-c98a-4198-9e0f-a4e7494c10e8\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:35:02 crc kubenswrapper[4716]: I1209 15:35:02.244911 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23f21e51-c98a-4198-9e0f-a4e7494c10e8-config-data\") pod \"glance-default-internal-api-0\" (UID: \"23f21e51-c98a-4198-9e0f-a4e7494c10e8\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:35:02 crc kubenswrapper[4716]: I1209 15:35:02.245026 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23f21e51-c98a-4198-9e0f-a4e7494c10e8-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"23f21e51-c98a-4198-9e0f-a4e7494c10e8\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:35:02 crc kubenswrapper[4716]: I1209 15:35:02.245107 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/23f21e51-c98a-4198-9e0f-a4e7494c10e8-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"23f21e51-c98a-4198-9e0f-a4e7494c10e8\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:35:02 crc kubenswrapper[4716]: I1209 15:35:02.346895 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/23f21e51-c98a-4198-9e0f-a4e7494c10e8-scripts\") pod \"glance-default-internal-api-0\" (UID: \"23f21e51-c98a-4198-9e0f-a4e7494c10e8\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:35:02 crc kubenswrapper[4716]: I1209 15:35:02.346977 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sj9c2\" (UniqueName: \"kubernetes.io/projected/23f21e51-c98a-4198-9e0f-a4e7494c10e8-kube-api-access-sj9c2\") pod \"glance-default-internal-api-0\" (UID: \"23f21e51-c98a-4198-9e0f-a4e7494c10e8\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:35:02 crc kubenswrapper[4716]: I1209 15:35:02.346997 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/23f21e51-c98a-4198-9e0f-a4e7494c10e8-logs\") pod \"glance-default-internal-api-0\" (UID: \"23f21e51-c98a-4198-9e0f-a4e7494c10e8\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:35:02 crc kubenswrapper[4716]: I1209 15:35:02.347027 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"23f21e51-c98a-4198-9e0f-a4e7494c10e8\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:35:02 crc kubenswrapper[4716]: I1209 15:35:02.347085 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/23f21e51-c98a-4198-9e0f-a4e7494c10e8-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"23f21e51-c98a-4198-9e0f-a4e7494c10e8\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:35:02 crc kubenswrapper[4716]: I1209 15:35:02.347119 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23f21e51-c98a-4198-9e0f-a4e7494c10e8-config-data\") pod \"glance-default-internal-api-0\" (UID: \"23f21e51-c98a-4198-9e0f-a4e7494c10e8\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:35:02 crc kubenswrapper[4716]: I1209 15:35:02.347221 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23f21e51-c98a-4198-9e0f-a4e7494c10e8-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"23f21e51-c98a-4198-9e0f-a4e7494c10e8\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:35:02 crc kubenswrapper[4716]: I1209 15:35:02.347288 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/23f21e51-c98a-4198-9e0f-a4e7494c10e8-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"23f21e51-c98a-4198-9e0f-a4e7494c10e8\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:35:02 crc kubenswrapper[4716]: I1209 15:35:02.348564 4716 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"23f21e51-c98a-4198-9e0f-a4e7494c10e8\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-internal-api-0" Dec 09 15:35:02 crc kubenswrapper[4716]: I1209 15:35:02.349594 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/23f21e51-c98a-4198-9e0f-a4e7494c10e8-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"23f21e51-c98a-4198-9e0f-a4e7494c10e8\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:35:02 crc kubenswrapper[4716]: I1209 15:35:02.349668 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/23f21e51-c98a-4198-9e0f-a4e7494c10e8-logs\") pod \"glance-default-internal-api-0\" (UID: \"23f21e51-c98a-4198-9e0f-a4e7494c10e8\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:35:02 crc kubenswrapper[4716]: I1209 15:35:02.377723 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/23f21e51-c98a-4198-9e0f-a4e7494c10e8-scripts\") pod \"glance-default-internal-api-0\" (UID: \"23f21e51-c98a-4198-9e0f-a4e7494c10e8\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:35:02 crc kubenswrapper[4716]: I1209 15:35:02.380269 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23f21e51-c98a-4198-9e0f-a4e7494c10e8-config-data\") pod \"glance-default-internal-api-0\" (UID: \"23f21e51-c98a-4198-9e0f-a4e7494c10e8\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:35:02 crc kubenswrapper[4716]: I1209 15:35:02.380513 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/23f21e51-c98a-4198-9e0f-a4e7494c10e8-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"23f21e51-c98a-4198-9e0f-a4e7494c10e8\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:35:02 crc kubenswrapper[4716]: I1209 15:35:02.380863 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23f21e51-c98a-4198-9e0f-a4e7494c10e8-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"23f21e51-c98a-4198-9e0f-a4e7494c10e8\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:35:02 crc kubenswrapper[4716]: I1209 15:35:02.394525 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sj9c2\" (UniqueName: \"kubernetes.io/projected/23f21e51-c98a-4198-9e0f-a4e7494c10e8-kube-api-access-sj9c2\") pod \"glance-default-internal-api-0\" (UID: \"23f21e51-c98a-4198-9e0f-a4e7494c10e8\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:35:02 crc kubenswrapper[4716]: I1209 15:35:02.410476 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"23f21e51-c98a-4198-9e0f-a4e7494c10e8\") " pod="openstack/glance-default-internal-api-0" Dec 09 15:35:02 crc kubenswrapper[4716]: I1209 15:35:02.423493 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 09 15:35:03 crc kubenswrapper[4716]: I1209 15:35:03.287845 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c0b9015f-858a-47f4-b619-a47e54c09d03" path="/var/lib/kubelet/pods/c0b9015f-858a-47f4-b619-a47e54c09d03/volumes" Dec 09 15:35:03 crc kubenswrapper[4716]: W1209 15:35:03.300655 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod23f21e51_c98a_4198_9e0f_a4e7494c10e8.slice/crio-7db76618665f67f999818d05f3e0b600f55e0279970e2e08821fb46e3bc2cf2b WatchSource:0}: Error finding container 7db76618665f67f999818d05f3e0b600f55e0279970e2e08821fb46e3bc2cf2b: Status 404 returned error can't find the container with id 7db76618665f67f999818d05f3e0b600f55e0279970e2e08821fb46e3bc2cf2b Dec 09 15:35:03 crc kubenswrapper[4716]: I1209 15:35:03.301089 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 09 15:35:03 crc kubenswrapper[4716]: I1209 15:35:03.558742 4716 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod34faeba3-0fa7-439c-888d-98bc7ec01369"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod34faeba3-0fa7-439c-888d-98bc7ec01369] : Timed out while waiting for systemd to remove kubepods-besteffort-pod34faeba3_0fa7_439c_888d_98bc7ec01369.slice" Dec 09 15:35:03 crc kubenswrapper[4716]: I1209 15:35:03.995563 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"23f21e51-c98a-4198-9e0f-a4e7494c10e8","Type":"ContainerStarted","Data":"7db76618665f67f999818d05f3e0b600f55e0279970e2e08821fb46e3bc2cf2b"} Dec 09 15:35:05 crc kubenswrapper[4716]: I1209 15:35:05.011168 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"23f21e51-c98a-4198-9e0f-a4e7494c10e8","Type":"ContainerStarted","Data":"9fe806ef3ecfa0e2baf22a0422243e7056a76143c60b743757ba954c364d88bd"} Dec 09 15:35:05 crc kubenswrapper[4716]: I1209 15:35:05.011790 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"23f21e51-c98a-4198-9e0f-a4e7494c10e8","Type":"ContainerStarted","Data":"2fa2367b48f65075ff59df78632b248396603134cc5603765054ed16282dacc1"} Dec 09 15:35:05 crc kubenswrapper[4716]: I1209 15:35:05.040576 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.0405540110000002 podStartE2EDuration="3.040554011s" podCreationTimestamp="2025-12-09 15:35:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:35:05.039064059 +0000 UTC m=+1592.193808067" watchObservedRunningTime="2025-12-09 15:35:05.040554011 +0000 UTC m=+1592.195297999" Dec 09 15:35:07 crc kubenswrapper[4716]: I1209 15:35:07.038838 4716 generic.go:334] "Generic (PLEG): container finished" podID="d03ea258-1672-476d-9357-dd4a98070676" containerID="85ed23c16caf8d6d69fd07a96d1a387a0bf71f9dd51f86519fe60c4c3c37e51d" exitCode=0 Dec 09 15:35:07 crc kubenswrapper[4716]: I1209 15:35:07.039344 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d03ea258-1672-476d-9357-dd4a98070676","Type":"ContainerDied","Data":"85ed23c16caf8d6d69fd07a96d1a387a0bf71f9dd51f86519fe60c4c3c37e51d"} Dec 09 15:35:07 crc kubenswrapper[4716]: I1209 15:35:07.666670 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 15:35:07 crc kubenswrapper[4716]: I1209 15:35:07.775944 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d03ea258-1672-476d-9357-dd4a98070676-run-httpd\") pod \"d03ea258-1672-476d-9357-dd4a98070676\" (UID: \"d03ea258-1672-476d-9357-dd4a98070676\") " Dec 09 15:35:07 crc kubenswrapper[4716]: I1209 15:35:07.776270 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d03ea258-1672-476d-9357-dd4a98070676-config-data\") pod \"d03ea258-1672-476d-9357-dd4a98070676\" (UID: \"d03ea258-1672-476d-9357-dd4a98070676\") " Dec 09 15:35:07 crc kubenswrapper[4716]: I1209 15:35:07.776613 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d03ea258-1672-476d-9357-dd4a98070676-log-httpd\") pod \"d03ea258-1672-476d-9357-dd4a98070676\" (UID: \"d03ea258-1672-476d-9357-dd4a98070676\") " Dec 09 15:35:07 crc kubenswrapper[4716]: I1209 15:35:07.776614 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d03ea258-1672-476d-9357-dd4a98070676-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "d03ea258-1672-476d-9357-dd4a98070676" (UID: "d03ea258-1672-476d-9357-dd4a98070676"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:35:07 crc kubenswrapper[4716]: I1209 15:35:07.777000 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d03ea258-1672-476d-9357-dd4a98070676-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "d03ea258-1672-476d-9357-dd4a98070676" (UID: "d03ea258-1672-476d-9357-dd4a98070676"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:35:07 crc kubenswrapper[4716]: I1209 15:35:07.777084 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d03ea258-1672-476d-9357-dd4a98070676-combined-ca-bundle\") pod \"d03ea258-1672-476d-9357-dd4a98070676\" (UID: \"d03ea258-1672-476d-9357-dd4a98070676\") " Dec 09 15:35:07 crc kubenswrapper[4716]: I1209 15:35:07.777239 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d03ea258-1672-476d-9357-dd4a98070676-scripts\") pod \"d03ea258-1672-476d-9357-dd4a98070676\" (UID: \"d03ea258-1672-476d-9357-dd4a98070676\") " Dec 09 15:35:07 crc kubenswrapper[4716]: I1209 15:35:07.777746 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d03ea258-1672-476d-9357-dd4a98070676-sg-core-conf-yaml\") pod \"d03ea258-1672-476d-9357-dd4a98070676\" (UID: \"d03ea258-1672-476d-9357-dd4a98070676\") " Dec 09 15:35:07 crc kubenswrapper[4716]: I1209 15:35:07.777818 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lf6gp\" (UniqueName: \"kubernetes.io/projected/d03ea258-1672-476d-9357-dd4a98070676-kube-api-access-lf6gp\") pod \"d03ea258-1672-476d-9357-dd4a98070676\" (UID: \"d03ea258-1672-476d-9357-dd4a98070676\") " Dec 09 15:35:07 crc kubenswrapper[4716]: I1209 15:35:07.778674 4716 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d03ea258-1672-476d-9357-dd4a98070676-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:07 crc kubenswrapper[4716]: I1209 15:35:07.778694 4716 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d03ea258-1672-476d-9357-dd4a98070676-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:07 crc kubenswrapper[4716]: I1209 15:35:07.786144 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d03ea258-1672-476d-9357-dd4a98070676-scripts" (OuterVolumeSpecName: "scripts") pod "d03ea258-1672-476d-9357-dd4a98070676" (UID: "d03ea258-1672-476d-9357-dd4a98070676"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:35:07 crc kubenswrapper[4716]: I1209 15:35:07.789524 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d03ea258-1672-476d-9357-dd4a98070676-kube-api-access-lf6gp" (OuterVolumeSpecName: "kube-api-access-lf6gp") pod "d03ea258-1672-476d-9357-dd4a98070676" (UID: "d03ea258-1672-476d-9357-dd4a98070676"). InnerVolumeSpecName "kube-api-access-lf6gp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:35:07 crc kubenswrapper[4716]: I1209 15:35:07.820033 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d03ea258-1672-476d-9357-dd4a98070676-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "d03ea258-1672-476d-9357-dd4a98070676" (UID: "d03ea258-1672-476d-9357-dd4a98070676"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:35:07 crc kubenswrapper[4716]: I1209 15:35:07.881675 4716 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d03ea258-1672-476d-9357-dd4a98070676-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:07 crc kubenswrapper[4716]: I1209 15:35:07.881711 4716 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d03ea258-1672-476d-9357-dd4a98070676-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:07 crc kubenswrapper[4716]: I1209 15:35:07.881723 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lf6gp\" (UniqueName: \"kubernetes.io/projected/d03ea258-1672-476d-9357-dd4a98070676-kube-api-access-lf6gp\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:07 crc kubenswrapper[4716]: I1209 15:35:07.900677 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d03ea258-1672-476d-9357-dd4a98070676-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d03ea258-1672-476d-9357-dd4a98070676" (UID: "d03ea258-1672-476d-9357-dd4a98070676"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:35:07 crc kubenswrapper[4716]: I1209 15:35:07.919877 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d03ea258-1672-476d-9357-dd4a98070676-config-data" (OuterVolumeSpecName: "config-data") pod "d03ea258-1672-476d-9357-dd4a98070676" (UID: "d03ea258-1672-476d-9357-dd4a98070676"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:35:07 crc kubenswrapper[4716]: I1209 15:35:07.984314 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d03ea258-1672-476d-9357-dd4a98070676-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:07 crc kubenswrapper[4716]: I1209 15:35:07.984364 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d03ea258-1672-476d-9357-dd4a98070676-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.055337 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d03ea258-1672-476d-9357-dd4a98070676","Type":"ContainerDied","Data":"28b3d27fbf5c3633c8f9f155986b000b4abddcb41c62ee778e64c9fc49fec806"} Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.055400 4716 scope.go:117] "RemoveContainer" containerID="db295666a97b2504e4d59f496499001d8869474b73649961b186327596edde5e" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.055585 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.058113 4716 generic.go:334] "Generic (PLEG): container finished" podID="361ee6cb-d557-4f46-8cf8-da9c121604e2" containerID="cd06bf863ee800bf49b74513ca377f9acf689ffa0e5dfe55a5264f68c1fe45ac" exitCode=0 Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.058183 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-dm4wb" event={"ID":"361ee6cb-d557-4f46-8cf8-da9c121604e2","Type":"ContainerDied","Data":"cd06bf863ee800bf49b74513ca377f9acf689ffa0e5dfe55a5264f68c1fe45ac"} Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.080974 4716 scope.go:117] "RemoveContainer" containerID="8c2d0ff3a6c677cfdcdcdbc926dcc71f9e374fa0b9d40a4503fb24fefc6f7b01" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.128410 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.143772 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.147860 4716 scope.go:117] "RemoveContainer" containerID="f700bf7ba6e46c0ae3d914980bdd64931d626e560b23f0fefe8b7369c3acf1a6" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.171077 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:35:08 crc kubenswrapper[4716]: E1209 15:35:08.171829 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d03ea258-1672-476d-9357-dd4a98070676" containerName="ceilometer-notification-agent" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.171849 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="d03ea258-1672-476d-9357-dd4a98070676" containerName="ceilometer-notification-agent" Dec 09 15:35:08 crc kubenswrapper[4716]: E1209 15:35:08.171862 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d03ea258-1672-476d-9357-dd4a98070676" containerName="sg-core" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.171870 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="d03ea258-1672-476d-9357-dd4a98070676" containerName="sg-core" Dec 09 15:35:08 crc kubenswrapper[4716]: E1209 15:35:08.171890 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d03ea258-1672-476d-9357-dd4a98070676" containerName="ceilometer-central-agent" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.171898 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="d03ea258-1672-476d-9357-dd4a98070676" containerName="ceilometer-central-agent" Dec 09 15:35:08 crc kubenswrapper[4716]: E1209 15:35:08.171910 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d03ea258-1672-476d-9357-dd4a98070676" containerName="proxy-httpd" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.171918 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="d03ea258-1672-476d-9357-dd4a98070676" containerName="proxy-httpd" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.172282 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="d03ea258-1672-476d-9357-dd4a98070676" containerName="ceilometer-central-agent" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.172313 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="d03ea258-1672-476d-9357-dd4a98070676" containerName="sg-core" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.172330 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="d03ea258-1672-476d-9357-dd4a98070676" containerName="ceilometer-notification-agent" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.172353 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="d03ea258-1672-476d-9357-dd4a98070676" containerName="proxy-httpd" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.175140 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.180645 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.180879 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.194251 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.204271 4716 scope.go:117] "RemoveContainer" containerID="85ed23c16caf8d6d69fd07a96d1a387a0bf71f9dd51f86519fe60c4c3c37e51d" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.291287 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-log-httpd\") pod \"ceilometer-0\" (UID: \"45a3fab3-ed88-43ff-96d9-fa02a46b23ec\") " pod="openstack/ceilometer-0" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.291358 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"45a3fab3-ed88-43ff-96d9-fa02a46b23ec\") " pod="openstack/ceilometer-0" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.291410 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-run-httpd\") pod \"ceilometer-0\" (UID: \"45a3fab3-ed88-43ff-96d9-fa02a46b23ec\") " pod="openstack/ceilometer-0" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.291467 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sptnc\" (UniqueName: \"kubernetes.io/projected/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-kube-api-access-sptnc\") pod \"ceilometer-0\" (UID: \"45a3fab3-ed88-43ff-96d9-fa02a46b23ec\") " pod="openstack/ceilometer-0" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.291557 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-config-data\") pod \"ceilometer-0\" (UID: \"45a3fab3-ed88-43ff-96d9-fa02a46b23ec\") " pod="openstack/ceilometer-0" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.291599 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-scripts\") pod \"ceilometer-0\" (UID: \"45a3fab3-ed88-43ff-96d9-fa02a46b23ec\") " pod="openstack/ceilometer-0" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.291649 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"45a3fab3-ed88-43ff-96d9-fa02a46b23ec\") " pod="openstack/ceilometer-0" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.393335 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-config-data\") pod \"ceilometer-0\" (UID: \"45a3fab3-ed88-43ff-96d9-fa02a46b23ec\") " pod="openstack/ceilometer-0" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.393407 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-scripts\") pod \"ceilometer-0\" (UID: \"45a3fab3-ed88-43ff-96d9-fa02a46b23ec\") " pod="openstack/ceilometer-0" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.393442 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"45a3fab3-ed88-43ff-96d9-fa02a46b23ec\") " pod="openstack/ceilometer-0" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.393559 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-log-httpd\") pod \"ceilometer-0\" (UID: \"45a3fab3-ed88-43ff-96d9-fa02a46b23ec\") " pod="openstack/ceilometer-0" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.393585 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"45a3fab3-ed88-43ff-96d9-fa02a46b23ec\") " pod="openstack/ceilometer-0" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.393614 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-run-httpd\") pod \"ceilometer-0\" (UID: \"45a3fab3-ed88-43ff-96d9-fa02a46b23ec\") " pod="openstack/ceilometer-0" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.393676 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sptnc\" (UniqueName: \"kubernetes.io/projected/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-kube-api-access-sptnc\") pod \"ceilometer-0\" (UID: \"45a3fab3-ed88-43ff-96d9-fa02a46b23ec\") " pod="openstack/ceilometer-0" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.394545 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-log-httpd\") pod \"ceilometer-0\" (UID: \"45a3fab3-ed88-43ff-96d9-fa02a46b23ec\") " pod="openstack/ceilometer-0" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.395353 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-run-httpd\") pod \"ceilometer-0\" (UID: \"45a3fab3-ed88-43ff-96d9-fa02a46b23ec\") " pod="openstack/ceilometer-0" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.401253 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"45a3fab3-ed88-43ff-96d9-fa02a46b23ec\") " pod="openstack/ceilometer-0" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.409389 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"45a3fab3-ed88-43ff-96d9-fa02a46b23ec\") " pod="openstack/ceilometer-0" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.413727 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-scripts\") pod \"ceilometer-0\" (UID: \"45a3fab3-ed88-43ff-96d9-fa02a46b23ec\") " pod="openstack/ceilometer-0" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.414932 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-config-data\") pod \"ceilometer-0\" (UID: \"45a3fab3-ed88-43ff-96d9-fa02a46b23ec\") " pod="openstack/ceilometer-0" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.419389 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sptnc\" (UniqueName: \"kubernetes.io/projected/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-kube-api-access-sptnc\") pod \"ceilometer-0\" (UID: \"45a3fab3-ed88-43ff-96d9-fa02a46b23ec\") " pod="openstack/ceilometer-0" Dec 09 15:35:08 crc kubenswrapper[4716]: I1209 15:35:08.510695 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 15:35:09 crc kubenswrapper[4716]: I1209 15:35:09.118157 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:35:09 crc kubenswrapper[4716]: W1209 15:35:09.148756 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod45a3fab3_ed88_43ff_96d9_fa02a46b23ec.slice/crio-56a0c5afc69cb6aa2b674b18d5b4409beee80c4a03f15e7d02ad7c319db3ee0f WatchSource:0}: Error finding container 56a0c5afc69cb6aa2b674b18d5b4409beee80c4a03f15e7d02ad7c319db3ee0f: Status 404 returned error can't find the container with id 56a0c5afc69cb6aa2b674b18d5b4409beee80c4a03f15e7d02ad7c319db3ee0f Dec 09 15:35:09 crc kubenswrapper[4716]: I1209 15:35:09.245091 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d03ea258-1672-476d-9357-dd4a98070676" path="/var/lib/kubelet/pods/d03ea258-1672-476d-9357-dd4a98070676/volumes" Dec 09 15:35:09 crc kubenswrapper[4716]: I1209 15:35:09.337779 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Dec 09 15:35:09 crc kubenswrapper[4716]: I1209 15:35:09.339877 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Dec 09 15:35:09 crc kubenswrapper[4716]: I1209 15:35:09.392607 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Dec 09 15:35:09 crc kubenswrapper[4716]: I1209 15:35:09.399410 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Dec 09 15:35:09 crc kubenswrapper[4716]: I1209 15:35:09.537780 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-dm4wb" Dec 09 15:35:09 crc kubenswrapper[4716]: I1209 15:35:09.629081 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/361ee6cb-d557-4f46-8cf8-da9c121604e2-config-data\") pod \"361ee6cb-d557-4f46-8cf8-da9c121604e2\" (UID: \"361ee6cb-d557-4f46-8cf8-da9c121604e2\") " Dec 09 15:35:09 crc kubenswrapper[4716]: I1209 15:35:09.629357 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/361ee6cb-d557-4f46-8cf8-da9c121604e2-scripts\") pod \"361ee6cb-d557-4f46-8cf8-da9c121604e2\" (UID: \"361ee6cb-d557-4f46-8cf8-da9c121604e2\") " Dec 09 15:35:09 crc kubenswrapper[4716]: I1209 15:35:09.629523 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/361ee6cb-d557-4f46-8cf8-da9c121604e2-combined-ca-bundle\") pod \"361ee6cb-d557-4f46-8cf8-da9c121604e2\" (UID: \"361ee6cb-d557-4f46-8cf8-da9c121604e2\") " Dec 09 15:35:09 crc kubenswrapper[4716]: I1209 15:35:09.629582 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jn75s\" (UniqueName: \"kubernetes.io/projected/361ee6cb-d557-4f46-8cf8-da9c121604e2-kube-api-access-jn75s\") pod \"361ee6cb-d557-4f46-8cf8-da9c121604e2\" (UID: \"361ee6cb-d557-4f46-8cf8-da9c121604e2\") " Dec 09 15:35:09 crc kubenswrapper[4716]: I1209 15:35:09.638209 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/361ee6cb-d557-4f46-8cf8-da9c121604e2-kube-api-access-jn75s" (OuterVolumeSpecName: "kube-api-access-jn75s") pod "361ee6cb-d557-4f46-8cf8-da9c121604e2" (UID: "361ee6cb-d557-4f46-8cf8-da9c121604e2"). InnerVolumeSpecName "kube-api-access-jn75s". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:35:09 crc kubenswrapper[4716]: I1209 15:35:09.638462 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/361ee6cb-d557-4f46-8cf8-da9c121604e2-scripts" (OuterVolumeSpecName: "scripts") pod "361ee6cb-d557-4f46-8cf8-da9c121604e2" (UID: "361ee6cb-d557-4f46-8cf8-da9c121604e2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:35:09 crc kubenswrapper[4716]: I1209 15:35:09.671196 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/361ee6cb-d557-4f46-8cf8-da9c121604e2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "361ee6cb-d557-4f46-8cf8-da9c121604e2" (UID: "361ee6cb-d557-4f46-8cf8-da9c121604e2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:35:09 crc kubenswrapper[4716]: I1209 15:35:09.673431 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/361ee6cb-d557-4f46-8cf8-da9c121604e2-config-data" (OuterVolumeSpecName: "config-data") pod "361ee6cb-d557-4f46-8cf8-da9c121604e2" (UID: "361ee6cb-d557-4f46-8cf8-da9c121604e2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:35:09 crc kubenswrapper[4716]: I1209 15:35:09.733036 4716 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/361ee6cb-d557-4f46-8cf8-da9c121604e2-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:09 crc kubenswrapper[4716]: I1209 15:35:09.733309 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/361ee6cb-d557-4f46-8cf8-da9c121604e2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:09 crc kubenswrapper[4716]: I1209 15:35:09.733418 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jn75s\" (UniqueName: \"kubernetes.io/projected/361ee6cb-d557-4f46-8cf8-da9c121604e2-kube-api-access-jn75s\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:09 crc kubenswrapper[4716]: I1209 15:35:09.733481 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/361ee6cb-d557-4f46-8cf8-da9c121604e2-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:10 crc kubenswrapper[4716]: I1209 15:35:10.088688 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"45a3fab3-ed88-43ff-96d9-fa02a46b23ec","Type":"ContainerStarted","Data":"698cdb3b2d1a4cf414fe92f2da9386ee981d7e7931c7deba63d5968dae4245c1"} Dec 09 15:35:10 crc kubenswrapper[4716]: I1209 15:35:10.089018 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"45a3fab3-ed88-43ff-96d9-fa02a46b23ec","Type":"ContainerStarted","Data":"56a0c5afc69cb6aa2b674b18d5b4409beee80c4a03f15e7d02ad7c319db3ee0f"} Dec 09 15:35:10 crc kubenswrapper[4716]: I1209 15:35:10.092847 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-dm4wb" event={"ID":"361ee6cb-d557-4f46-8cf8-da9c121604e2","Type":"ContainerDied","Data":"0f9704e99c4c412f73d36398557de0d4dcc7217eaa73da6d815852d27a5b8575"} Dec 09 15:35:10 crc kubenswrapper[4716]: I1209 15:35:10.092905 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0f9704e99c4c412f73d36398557de0d4dcc7217eaa73da6d815852d27a5b8575" Dec 09 15:35:10 crc kubenswrapper[4716]: I1209 15:35:10.092942 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Dec 09 15:35:10 crc kubenswrapper[4716]: I1209 15:35:10.092961 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Dec 09 15:35:10 crc kubenswrapper[4716]: I1209 15:35:10.092968 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-dm4wb" Dec 09 15:35:10 crc kubenswrapper[4716]: I1209 15:35:10.216056 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 09 15:35:10 crc kubenswrapper[4716]: E1209 15:35:10.216907 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="361ee6cb-d557-4f46-8cf8-da9c121604e2" containerName="nova-cell0-conductor-db-sync" Dec 09 15:35:10 crc kubenswrapper[4716]: I1209 15:35:10.216929 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="361ee6cb-d557-4f46-8cf8-da9c121604e2" containerName="nova-cell0-conductor-db-sync" Dec 09 15:35:10 crc kubenswrapper[4716]: I1209 15:35:10.217219 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="361ee6cb-d557-4f46-8cf8-da9c121604e2" containerName="nova-cell0-conductor-db-sync" Dec 09 15:35:10 crc kubenswrapper[4716]: I1209 15:35:10.218189 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Dec 09 15:35:10 crc kubenswrapper[4716]: I1209 15:35:10.222463 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-nnrk9" Dec 09 15:35:10 crc kubenswrapper[4716]: I1209 15:35:10.223735 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Dec 09 15:35:10 crc kubenswrapper[4716]: I1209 15:35:10.240731 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 09 15:35:10 crc kubenswrapper[4716]: I1209 15:35:10.349119 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06727d8d-6fe6-4146-87af-7cb45ea7e1e1-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"06727d8d-6fe6-4146-87af-7cb45ea7e1e1\") " pod="openstack/nova-cell0-conductor-0" Dec 09 15:35:10 crc kubenswrapper[4716]: I1209 15:35:10.349214 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sl5bm\" (UniqueName: \"kubernetes.io/projected/06727d8d-6fe6-4146-87af-7cb45ea7e1e1-kube-api-access-sl5bm\") pod \"nova-cell0-conductor-0\" (UID: \"06727d8d-6fe6-4146-87af-7cb45ea7e1e1\") " pod="openstack/nova-cell0-conductor-0" Dec 09 15:35:10 crc kubenswrapper[4716]: I1209 15:35:10.349338 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06727d8d-6fe6-4146-87af-7cb45ea7e1e1-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"06727d8d-6fe6-4146-87af-7cb45ea7e1e1\") " pod="openstack/nova-cell0-conductor-0" Dec 09 15:35:10 crc kubenswrapper[4716]: I1209 15:35:10.452414 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06727d8d-6fe6-4146-87af-7cb45ea7e1e1-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"06727d8d-6fe6-4146-87af-7cb45ea7e1e1\") " pod="openstack/nova-cell0-conductor-0" Dec 09 15:35:10 crc kubenswrapper[4716]: I1209 15:35:10.453062 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sl5bm\" (UniqueName: \"kubernetes.io/projected/06727d8d-6fe6-4146-87af-7cb45ea7e1e1-kube-api-access-sl5bm\") pod \"nova-cell0-conductor-0\" (UID: \"06727d8d-6fe6-4146-87af-7cb45ea7e1e1\") " pod="openstack/nova-cell0-conductor-0" Dec 09 15:35:10 crc kubenswrapper[4716]: I1209 15:35:10.453419 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06727d8d-6fe6-4146-87af-7cb45ea7e1e1-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"06727d8d-6fe6-4146-87af-7cb45ea7e1e1\") " pod="openstack/nova-cell0-conductor-0" Dec 09 15:35:10 crc kubenswrapper[4716]: I1209 15:35:10.459485 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06727d8d-6fe6-4146-87af-7cb45ea7e1e1-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"06727d8d-6fe6-4146-87af-7cb45ea7e1e1\") " pod="openstack/nova-cell0-conductor-0" Dec 09 15:35:10 crc kubenswrapper[4716]: I1209 15:35:10.459846 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06727d8d-6fe6-4146-87af-7cb45ea7e1e1-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"06727d8d-6fe6-4146-87af-7cb45ea7e1e1\") " pod="openstack/nova-cell0-conductor-0" Dec 09 15:35:10 crc kubenswrapper[4716]: I1209 15:35:10.476520 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sl5bm\" (UniqueName: \"kubernetes.io/projected/06727d8d-6fe6-4146-87af-7cb45ea7e1e1-kube-api-access-sl5bm\") pod \"nova-cell0-conductor-0\" (UID: \"06727d8d-6fe6-4146-87af-7cb45ea7e1e1\") " pod="openstack/nova-cell0-conductor-0" Dec 09 15:35:10 crc kubenswrapper[4716]: I1209 15:35:10.537729 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Dec 09 15:35:11 crc kubenswrapper[4716]: I1209 15:35:11.118726 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"45a3fab3-ed88-43ff-96d9-fa02a46b23ec","Type":"ContainerStarted","Data":"05e191539e5635a752a6bb5bedd814c7143cc4b69f1b2f4ff4623bd45d4ab798"} Dec 09 15:35:11 crc kubenswrapper[4716]: I1209 15:35:11.147711 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 09 15:35:12 crc kubenswrapper[4716]: I1209 15:35:12.137565 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"45a3fab3-ed88-43ff-96d9-fa02a46b23ec","Type":"ContainerStarted","Data":"4100ab696b251835939e43d78d26fb8262aefd963c606ff714e2c8c8aa07834a"} Dec 09 15:35:12 crc kubenswrapper[4716]: I1209 15:35:12.143275 4716 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 09 15:35:12 crc kubenswrapper[4716]: I1209 15:35:12.143304 4716 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 09 15:35:12 crc kubenswrapper[4716]: I1209 15:35:12.143240 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"06727d8d-6fe6-4146-87af-7cb45ea7e1e1","Type":"ContainerStarted","Data":"c491869538d04b2bd947beabcccd44b97d688b5a56106ffce6c98b6cca17aa1a"} Dec 09 15:35:12 crc kubenswrapper[4716]: I1209 15:35:12.143595 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Dec 09 15:35:12 crc kubenswrapper[4716]: I1209 15:35:12.143637 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"06727d8d-6fe6-4146-87af-7cb45ea7e1e1","Type":"ContainerStarted","Data":"7bba059ef120258051f26b6ed1a603f7d2ff629c0c203f0fad00bfa509dfd0bb"} Dec 09 15:35:12 crc kubenswrapper[4716]: I1209 15:35:12.176117 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.176092314 podStartE2EDuration="2.176092314s" podCreationTimestamp="2025-12-09 15:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:35:12.165395301 +0000 UTC m=+1599.320139289" watchObservedRunningTime="2025-12-09 15:35:12.176092314 +0000 UTC m=+1599.330836302" Dec 09 15:35:12 crc kubenswrapper[4716]: I1209 15:35:12.313863 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Dec 09 15:35:12 crc kubenswrapper[4716]: I1209 15:35:12.327304 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Dec 09 15:35:12 crc kubenswrapper[4716]: I1209 15:35:12.424790 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Dec 09 15:35:12 crc kubenswrapper[4716]: I1209 15:35:12.424838 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Dec 09 15:35:12 crc kubenswrapper[4716]: I1209 15:35:12.560581 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Dec 09 15:35:12 crc kubenswrapper[4716]: I1209 15:35:12.609412 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Dec 09 15:35:12 crc kubenswrapper[4716]: I1209 15:35:12.737940 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-create-rrjbc"] Dec 09 15:35:12 crc kubenswrapper[4716]: I1209 15:35:12.739467 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-rrjbc" Dec 09 15:35:12 crc kubenswrapper[4716]: I1209 15:35:12.779088 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-rrjbc"] Dec 09 15:35:12 crc kubenswrapper[4716]: I1209 15:35:12.860439 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-a657-account-create-update-fgzll"] Dec 09 15:35:12 crc kubenswrapper[4716]: I1209 15:35:12.864084 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-a657-account-create-update-fgzll" Dec 09 15:35:12 crc kubenswrapper[4716]: I1209 15:35:12.869376 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-db-secret" Dec 09 15:35:12 crc kubenswrapper[4716]: I1209 15:35:12.915234 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-a657-account-create-update-fgzll"] Dec 09 15:35:12 crc kubenswrapper[4716]: I1209 15:35:12.936455 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t295v\" (UniqueName: \"kubernetes.io/projected/94a3f2ac-e9a9-4974-ae3d-20fded31a2fa-kube-api-access-t295v\") pod \"aodh-db-create-rrjbc\" (UID: \"94a3f2ac-e9a9-4974-ae3d-20fded31a2fa\") " pod="openstack/aodh-db-create-rrjbc" Dec 09 15:35:12 crc kubenswrapper[4716]: I1209 15:35:12.936924 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/94a3f2ac-e9a9-4974-ae3d-20fded31a2fa-operator-scripts\") pod \"aodh-db-create-rrjbc\" (UID: \"94a3f2ac-e9a9-4974-ae3d-20fded31a2fa\") " pod="openstack/aodh-db-create-rrjbc" Dec 09 15:35:13 crc kubenswrapper[4716]: I1209 15:35:13.038612 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/94a3f2ac-e9a9-4974-ae3d-20fded31a2fa-operator-scripts\") pod \"aodh-db-create-rrjbc\" (UID: \"94a3f2ac-e9a9-4974-ae3d-20fded31a2fa\") " pod="openstack/aodh-db-create-rrjbc" Dec 09 15:35:13 crc kubenswrapper[4716]: I1209 15:35:13.038791 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m92nq\" (UniqueName: \"kubernetes.io/projected/19d87406-0357-4685-9062-4c740b1e0346-kube-api-access-m92nq\") pod \"aodh-a657-account-create-update-fgzll\" (UID: \"19d87406-0357-4685-9062-4c740b1e0346\") " pod="openstack/aodh-a657-account-create-update-fgzll" Dec 09 15:35:13 crc kubenswrapper[4716]: I1209 15:35:13.038822 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t295v\" (UniqueName: \"kubernetes.io/projected/94a3f2ac-e9a9-4974-ae3d-20fded31a2fa-kube-api-access-t295v\") pod \"aodh-db-create-rrjbc\" (UID: \"94a3f2ac-e9a9-4974-ae3d-20fded31a2fa\") " pod="openstack/aodh-db-create-rrjbc" Dec 09 15:35:13 crc kubenswrapper[4716]: I1209 15:35:13.038853 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/19d87406-0357-4685-9062-4c740b1e0346-operator-scripts\") pod \"aodh-a657-account-create-update-fgzll\" (UID: \"19d87406-0357-4685-9062-4c740b1e0346\") " pod="openstack/aodh-a657-account-create-update-fgzll" Dec 09 15:35:13 crc kubenswrapper[4716]: I1209 15:35:13.039317 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/94a3f2ac-e9a9-4974-ae3d-20fded31a2fa-operator-scripts\") pod \"aodh-db-create-rrjbc\" (UID: \"94a3f2ac-e9a9-4974-ae3d-20fded31a2fa\") " pod="openstack/aodh-db-create-rrjbc" Dec 09 15:35:13 crc kubenswrapper[4716]: I1209 15:35:13.062302 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t295v\" (UniqueName: \"kubernetes.io/projected/94a3f2ac-e9a9-4974-ae3d-20fded31a2fa-kube-api-access-t295v\") pod \"aodh-db-create-rrjbc\" (UID: \"94a3f2ac-e9a9-4974-ae3d-20fded31a2fa\") " pod="openstack/aodh-db-create-rrjbc" Dec 09 15:35:13 crc kubenswrapper[4716]: I1209 15:35:13.141343 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m92nq\" (UniqueName: \"kubernetes.io/projected/19d87406-0357-4685-9062-4c740b1e0346-kube-api-access-m92nq\") pod \"aodh-a657-account-create-update-fgzll\" (UID: \"19d87406-0357-4685-9062-4c740b1e0346\") " pod="openstack/aodh-a657-account-create-update-fgzll" Dec 09 15:35:13 crc kubenswrapper[4716]: I1209 15:35:13.141465 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/19d87406-0357-4685-9062-4c740b1e0346-operator-scripts\") pod \"aodh-a657-account-create-update-fgzll\" (UID: \"19d87406-0357-4685-9062-4c740b1e0346\") " pod="openstack/aodh-a657-account-create-update-fgzll" Dec 09 15:35:13 crc kubenswrapper[4716]: I1209 15:35:13.142563 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/19d87406-0357-4685-9062-4c740b1e0346-operator-scripts\") pod \"aodh-a657-account-create-update-fgzll\" (UID: \"19d87406-0357-4685-9062-4c740b1e0346\") " pod="openstack/aodh-a657-account-create-update-fgzll" Dec 09 15:35:13 crc kubenswrapper[4716]: I1209 15:35:13.155659 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Dec 09 15:35:13 crc kubenswrapper[4716]: I1209 15:35:13.155711 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Dec 09 15:35:13 crc kubenswrapper[4716]: I1209 15:35:13.168174 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m92nq\" (UniqueName: \"kubernetes.io/projected/19d87406-0357-4685-9062-4c740b1e0346-kube-api-access-m92nq\") pod \"aodh-a657-account-create-update-fgzll\" (UID: \"19d87406-0357-4685-9062-4c740b1e0346\") " pod="openstack/aodh-a657-account-create-update-fgzll" Dec 09 15:35:13 crc kubenswrapper[4716]: I1209 15:35:13.193470 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-a657-account-create-update-fgzll" Dec 09 15:35:13 crc kubenswrapper[4716]: I1209 15:35:13.364097 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-rrjbc" Dec 09 15:35:13 crc kubenswrapper[4716]: I1209 15:35:13.795907 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-a657-account-create-update-fgzll"] Dec 09 15:35:14 crc kubenswrapper[4716]: I1209 15:35:14.006906 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-rrjbc"] Dec 09 15:35:14 crc kubenswrapper[4716]: I1209 15:35:14.194047 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-a657-account-create-update-fgzll" event={"ID":"19d87406-0357-4685-9062-4c740b1e0346","Type":"ContainerStarted","Data":"cfcb48d717c055735f3bb3c3703dbbe7893fd20f3b0142619791442b77f53f52"} Dec 09 15:35:14 crc kubenswrapper[4716]: I1209 15:35:14.194402 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-a657-account-create-update-fgzll" event={"ID":"19d87406-0357-4685-9062-4c740b1e0346","Type":"ContainerStarted","Data":"b4728dcce48fc050c933a87dcaa1b4777e5787a94339cc81064ba07bbd4b31cd"} Dec 09 15:35:14 crc kubenswrapper[4716]: I1209 15:35:14.198547 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-rrjbc" event={"ID":"94a3f2ac-e9a9-4974-ae3d-20fded31a2fa","Type":"ContainerStarted","Data":"bfa34041787703de2cf43a9d739beedd1a4550f7088cb6ea15628d9d04485a11"} Dec 09 15:35:14 crc kubenswrapper[4716]: I1209 15:35:14.209656 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"45a3fab3-ed88-43ff-96d9-fa02a46b23ec","Type":"ContainerStarted","Data":"3e929f1764ee4fd53fe79951d3e1537b5c693234d5ae42832246789f2d70c3c0"} Dec 09 15:35:14 crc kubenswrapper[4716]: I1209 15:35:14.209838 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 09 15:35:14 crc kubenswrapper[4716]: I1209 15:35:14.225262 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-a657-account-create-update-fgzll" podStartSLOduration=2.225238835 podStartE2EDuration="2.225238835s" podCreationTimestamp="2025-12-09 15:35:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:35:14.219213654 +0000 UTC m=+1601.373957642" watchObservedRunningTime="2025-12-09 15:35:14.225238835 +0000 UTC m=+1601.379982823" Dec 09 15:35:14 crc kubenswrapper[4716]: I1209 15:35:14.257986 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.516329205 podStartE2EDuration="6.257957742s" podCreationTimestamp="2025-12-09 15:35:08 +0000 UTC" firstStartedPulling="2025-12-09 15:35:09.161968207 +0000 UTC m=+1596.316712195" lastFinishedPulling="2025-12-09 15:35:12.903596744 +0000 UTC m=+1600.058340732" observedRunningTime="2025-12-09 15:35:14.247739142 +0000 UTC m=+1601.402483140" watchObservedRunningTime="2025-12-09 15:35:14.257957742 +0000 UTC m=+1601.412701730" Dec 09 15:35:15 crc kubenswrapper[4716]: I1209 15:35:15.224187 4716 generic.go:334] "Generic (PLEG): container finished" podID="94a3f2ac-e9a9-4974-ae3d-20fded31a2fa" containerID="535a83fbc937decb13fd847adb3622678ff80d559b2a5532df02a057b7f164ad" exitCode=0 Dec 09 15:35:15 crc kubenswrapper[4716]: I1209 15:35:15.227490 4716 generic.go:334] "Generic (PLEG): container finished" podID="19d87406-0357-4685-9062-4c740b1e0346" containerID="cfcb48d717c055735f3bb3c3703dbbe7893fd20f3b0142619791442b77f53f52" exitCode=0 Dec 09 15:35:15 crc kubenswrapper[4716]: I1209 15:35:15.229539 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-rrjbc" event={"ID":"94a3f2ac-e9a9-4974-ae3d-20fded31a2fa","Type":"ContainerDied","Data":"535a83fbc937decb13fd847adb3622678ff80d559b2a5532df02a057b7f164ad"} Dec 09 15:35:15 crc kubenswrapper[4716]: I1209 15:35:15.229587 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-a657-account-create-update-fgzll" event={"ID":"19d87406-0357-4685-9062-4c740b1e0346","Type":"ContainerDied","Data":"cfcb48d717c055735f3bb3c3703dbbe7893fd20f3b0142619791442b77f53f52"} Dec 09 15:35:15 crc kubenswrapper[4716]: I1209 15:35:15.686458 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Dec 09 15:35:15 crc kubenswrapper[4716]: I1209 15:35:15.686953 4716 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 09 15:35:15 crc kubenswrapper[4716]: I1209 15:35:15.692088 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Dec 09 15:35:16 crc kubenswrapper[4716]: I1209 15:35:16.849521 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-a657-account-create-update-fgzll" Dec 09 15:35:16 crc kubenswrapper[4716]: I1209 15:35:16.858894 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-rrjbc" Dec 09 15:35:16 crc kubenswrapper[4716]: I1209 15:35:16.922977 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m92nq\" (UniqueName: \"kubernetes.io/projected/19d87406-0357-4685-9062-4c740b1e0346-kube-api-access-m92nq\") pod \"19d87406-0357-4685-9062-4c740b1e0346\" (UID: \"19d87406-0357-4685-9062-4c740b1e0346\") " Dec 09 15:35:16 crc kubenswrapper[4716]: I1209 15:35:16.923038 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/19d87406-0357-4685-9062-4c740b1e0346-operator-scripts\") pod \"19d87406-0357-4685-9062-4c740b1e0346\" (UID: \"19d87406-0357-4685-9062-4c740b1e0346\") " Dec 09 15:35:16 crc kubenswrapper[4716]: I1209 15:35:16.923086 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/94a3f2ac-e9a9-4974-ae3d-20fded31a2fa-operator-scripts\") pod \"94a3f2ac-e9a9-4974-ae3d-20fded31a2fa\" (UID: \"94a3f2ac-e9a9-4974-ae3d-20fded31a2fa\") " Dec 09 15:35:16 crc kubenswrapper[4716]: I1209 15:35:16.923114 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t295v\" (UniqueName: \"kubernetes.io/projected/94a3f2ac-e9a9-4974-ae3d-20fded31a2fa-kube-api-access-t295v\") pod \"94a3f2ac-e9a9-4974-ae3d-20fded31a2fa\" (UID: \"94a3f2ac-e9a9-4974-ae3d-20fded31a2fa\") " Dec 09 15:35:16 crc kubenswrapper[4716]: I1209 15:35:16.923699 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/19d87406-0357-4685-9062-4c740b1e0346-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "19d87406-0357-4685-9062-4c740b1e0346" (UID: "19d87406-0357-4685-9062-4c740b1e0346"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:35:16 crc kubenswrapper[4716]: I1209 15:35:16.923822 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/94a3f2ac-e9a9-4974-ae3d-20fded31a2fa-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "94a3f2ac-e9a9-4974-ae3d-20fded31a2fa" (UID: "94a3f2ac-e9a9-4974-ae3d-20fded31a2fa"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:35:16 crc kubenswrapper[4716]: I1209 15:35:16.924307 4716 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/19d87406-0357-4685-9062-4c740b1e0346-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:16 crc kubenswrapper[4716]: I1209 15:35:16.924329 4716 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/94a3f2ac-e9a9-4974-ae3d-20fded31a2fa-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:16 crc kubenswrapper[4716]: I1209 15:35:16.930531 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/19d87406-0357-4685-9062-4c740b1e0346-kube-api-access-m92nq" (OuterVolumeSpecName: "kube-api-access-m92nq") pod "19d87406-0357-4685-9062-4c740b1e0346" (UID: "19d87406-0357-4685-9062-4c740b1e0346"). InnerVolumeSpecName "kube-api-access-m92nq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:35:16 crc kubenswrapper[4716]: I1209 15:35:16.936007 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/94a3f2ac-e9a9-4974-ae3d-20fded31a2fa-kube-api-access-t295v" (OuterVolumeSpecName: "kube-api-access-t295v") pod "94a3f2ac-e9a9-4974-ae3d-20fded31a2fa" (UID: "94a3f2ac-e9a9-4974-ae3d-20fded31a2fa"). InnerVolumeSpecName "kube-api-access-t295v". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:35:17 crc kubenswrapper[4716]: I1209 15:35:17.026215 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m92nq\" (UniqueName: \"kubernetes.io/projected/19d87406-0357-4685-9062-4c740b1e0346-kube-api-access-m92nq\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:17 crc kubenswrapper[4716]: I1209 15:35:17.026545 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t295v\" (UniqueName: \"kubernetes.io/projected/94a3f2ac-e9a9-4974-ae3d-20fded31a2fa-kube-api-access-t295v\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:17 crc kubenswrapper[4716]: I1209 15:35:17.254989 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-a657-account-create-update-fgzll" Dec 09 15:35:17 crc kubenswrapper[4716]: I1209 15:35:17.255958 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-a657-account-create-update-fgzll" event={"ID":"19d87406-0357-4685-9062-4c740b1e0346","Type":"ContainerDied","Data":"b4728dcce48fc050c933a87dcaa1b4777e5787a94339cc81064ba07bbd4b31cd"} Dec 09 15:35:17 crc kubenswrapper[4716]: I1209 15:35:17.256022 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b4728dcce48fc050c933a87dcaa1b4777e5787a94339cc81064ba07bbd4b31cd" Dec 09 15:35:17 crc kubenswrapper[4716]: I1209 15:35:17.257872 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-rrjbc" event={"ID":"94a3f2ac-e9a9-4974-ae3d-20fded31a2fa","Type":"ContainerDied","Data":"bfa34041787703de2cf43a9d739beedd1a4550f7088cb6ea15628d9d04485a11"} Dec 09 15:35:17 crc kubenswrapper[4716]: I1209 15:35:17.257911 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bfa34041787703de2cf43a9d739beedd1a4550f7088cb6ea15628d9d04485a11" Dec 09 15:35:17 crc kubenswrapper[4716]: I1209 15:35:17.257954 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-rrjbc" Dec 09 15:35:20 crc kubenswrapper[4716]: I1209 15:35:20.578433 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.086201 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-2zp9h"] Dec 09 15:35:21 crc kubenswrapper[4716]: E1209 15:35:21.087200 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19d87406-0357-4685-9062-4c740b1e0346" containerName="mariadb-account-create-update" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.087225 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="19d87406-0357-4685-9062-4c740b1e0346" containerName="mariadb-account-create-update" Dec 09 15:35:21 crc kubenswrapper[4716]: E1209 15:35:21.087268 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94a3f2ac-e9a9-4974-ae3d-20fded31a2fa" containerName="mariadb-database-create" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.087278 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="94a3f2ac-e9a9-4974-ae3d-20fded31a2fa" containerName="mariadb-database-create" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.087594 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="19d87406-0357-4685-9062-4c740b1e0346" containerName="mariadb-account-create-update" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.087633 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="94a3f2ac-e9a9-4974-ae3d-20fded31a2fa" containerName="mariadb-database-create" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.088617 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-2zp9h" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.093559 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.093690 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.120631 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-znjlh\" (UniqueName: \"kubernetes.io/projected/2a66dc81-048f-4d25-b04f-16e8063d92b7-kube-api-access-znjlh\") pod \"nova-cell0-cell-mapping-2zp9h\" (UID: \"2a66dc81-048f-4d25-b04f-16e8063d92b7\") " pod="openstack/nova-cell0-cell-mapping-2zp9h" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.121104 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a66dc81-048f-4d25-b04f-16e8063d92b7-config-data\") pod \"nova-cell0-cell-mapping-2zp9h\" (UID: \"2a66dc81-048f-4d25-b04f-16e8063d92b7\") " pod="openstack/nova-cell0-cell-mapping-2zp9h" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.121182 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a66dc81-048f-4d25-b04f-16e8063d92b7-scripts\") pod \"nova-cell0-cell-mapping-2zp9h\" (UID: \"2a66dc81-048f-4d25-b04f-16e8063d92b7\") " pod="openstack/nova-cell0-cell-mapping-2zp9h" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.121276 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a66dc81-048f-4d25-b04f-16e8063d92b7-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-2zp9h\" (UID: \"2a66dc81-048f-4d25-b04f-16e8063d92b7\") " pod="openstack/nova-cell0-cell-mapping-2zp9h" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.127887 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-2zp9h"] Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.224860 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a66dc81-048f-4d25-b04f-16e8063d92b7-scripts\") pod \"nova-cell0-cell-mapping-2zp9h\" (UID: \"2a66dc81-048f-4d25-b04f-16e8063d92b7\") " pod="openstack/nova-cell0-cell-mapping-2zp9h" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.225053 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a66dc81-048f-4d25-b04f-16e8063d92b7-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-2zp9h\" (UID: \"2a66dc81-048f-4d25-b04f-16e8063d92b7\") " pod="openstack/nova-cell0-cell-mapping-2zp9h" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.225253 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-znjlh\" (UniqueName: \"kubernetes.io/projected/2a66dc81-048f-4d25-b04f-16e8063d92b7-kube-api-access-znjlh\") pod \"nova-cell0-cell-mapping-2zp9h\" (UID: \"2a66dc81-048f-4d25-b04f-16e8063d92b7\") " pod="openstack/nova-cell0-cell-mapping-2zp9h" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.225294 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a66dc81-048f-4d25-b04f-16e8063d92b7-config-data\") pod \"nova-cell0-cell-mapping-2zp9h\" (UID: \"2a66dc81-048f-4d25-b04f-16e8063d92b7\") " pod="openstack/nova-cell0-cell-mapping-2zp9h" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.234924 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a66dc81-048f-4d25-b04f-16e8063d92b7-config-data\") pod \"nova-cell0-cell-mapping-2zp9h\" (UID: \"2a66dc81-048f-4d25-b04f-16e8063d92b7\") " pod="openstack/nova-cell0-cell-mapping-2zp9h" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.246781 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a66dc81-048f-4d25-b04f-16e8063d92b7-scripts\") pod \"nova-cell0-cell-mapping-2zp9h\" (UID: \"2a66dc81-048f-4d25-b04f-16e8063d92b7\") " pod="openstack/nova-cell0-cell-mapping-2zp9h" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.247415 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a66dc81-048f-4d25-b04f-16e8063d92b7-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-2zp9h\" (UID: \"2a66dc81-048f-4d25-b04f-16e8063d92b7\") " pod="openstack/nova-cell0-cell-mapping-2zp9h" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.318577 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.320514 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-znjlh\" (UniqueName: \"kubernetes.io/projected/2a66dc81-048f-4d25-b04f-16e8063d92b7-kube-api-access-znjlh\") pod \"nova-cell0-cell-mapping-2zp9h\" (UID: \"2a66dc81-048f-4d25-b04f-16e8063d92b7\") " pod="openstack/nova-cell0-cell-mapping-2zp9h" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.320740 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.338438 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.341130 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.363188 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.363410 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.370066 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.431136 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-2zp9h" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.446259 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0b4428b-bbd0-4a02-8823-47813ec73b24-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"c0b4428b-bbd0-4a02-8823-47813ec73b24\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.446818 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/65454596-9d51-4fca-b5eb-c326dc4b3bb6-logs\") pod \"nova-api-0\" (UID: \"65454596-9d51-4fca-b5eb-c326dc4b3bb6\") " pod="openstack/nova-api-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.446873 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65454596-9d51-4fca-b5eb-c326dc4b3bb6-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"65454596-9d51-4fca-b5eb-c326dc4b3bb6\") " pod="openstack/nova-api-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.446903 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65454596-9d51-4fca-b5eb-c326dc4b3bb6-config-data\") pod \"nova-api-0\" (UID: \"65454596-9d51-4fca-b5eb-c326dc4b3bb6\") " pod="openstack/nova-api-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.446995 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0b4428b-bbd0-4a02-8823-47813ec73b24-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"c0b4428b-bbd0-4a02-8823-47813ec73b24\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.447161 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cgk88\" (UniqueName: \"kubernetes.io/projected/c0b4428b-bbd0-4a02-8823-47813ec73b24-kube-api-access-cgk88\") pod \"nova-cell1-novncproxy-0\" (UID: \"c0b4428b-bbd0-4a02-8823-47813ec73b24\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.447257 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-skdc7\" (UniqueName: \"kubernetes.io/projected/65454596-9d51-4fca-b5eb-c326dc4b3bb6-kube-api-access-skdc7\") pod \"nova-api-0\" (UID: \"65454596-9d51-4fca-b5eb-c326dc4b3bb6\") " pod="openstack/nova-api-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.488807 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.499479 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.511852 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.522182 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.536828 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.549932 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3a56d5-7ebc-4bab-84fd-ac67e93145d8-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"7e3a56d5-7ebc-4bab-84fd-ac67e93145d8\") " pod="openstack/nova-metadata-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.550239 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0b4428b-bbd0-4a02-8823-47813ec73b24-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"c0b4428b-bbd0-4a02-8823-47813ec73b24\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.551157 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7e3a56d5-7ebc-4bab-84fd-ac67e93145d8-logs\") pod \"nova-metadata-0\" (UID: \"7e3a56d5-7ebc-4bab-84fd-ac67e93145d8\") " pod="openstack/nova-metadata-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.551276 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e3a56d5-7ebc-4bab-84fd-ac67e93145d8-config-data\") pod \"nova-metadata-0\" (UID: \"7e3a56d5-7ebc-4bab-84fd-ac67e93145d8\") " pod="openstack/nova-metadata-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.551469 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/65454596-9d51-4fca-b5eb-c326dc4b3bb6-logs\") pod \"nova-api-0\" (UID: \"65454596-9d51-4fca-b5eb-c326dc4b3bb6\") " pod="openstack/nova-api-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.551588 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65454596-9d51-4fca-b5eb-c326dc4b3bb6-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"65454596-9d51-4fca-b5eb-c326dc4b3bb6\") " pod="openstack/nova-api-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.551719 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65454596-9d51-4fca-b5eb-c326dc4b3bb6-config-data\") pod \"nova-api-0\" (UID: \"65454596-9d51-4fca-b5eb-c326dc4b3bb6\") " pod="openstack/nova-api-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.551841 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0b4428b-bbd0-4a02-8823-47813ec73b24-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"c0b4428b-bbd0-4a02-8823-47813ec73b24\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.552049 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cgk88\" (UniqueName: \"kubernetes.io/projected/c0b4428b-bbd0-4a02-8823-47813ec73b24-kube-api-access-cgk88\") pod \"nova-cell1-novncproxy-0\" (UID: \"c0b4428b-bbd0-4a02-8823-47813ec73b24\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.552173 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mfbt2\" (UniqueName: \"kubernetes.io/projected/7e3a56d5-7ebc-4bab-84fd-ac67e93145d8-kube-api-access-mfbt2\") pod \"nova-metadata-0\" (UID: \"7e3a56d5-7ebc-4bab-84fd-ac67e93145d8\") " pod="openstack/nova-metadata-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.552290 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-skdc7\" (UniqueName: \"kubernetes.io/projected/65454596-9d51-4fca-b5eb-c326dc4b3bb6-kube-api-access-skdc7\") pod \"nova-api-0\" (UID: \"65454596-9d51-4fca-b5eb-c326dc4b3bb6\") " pod="openstack/nova-api-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.553243 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/65454596-9d51-4fca-b5eb-c326dc4b3bb6-logs\") pod \"nova-api-0\" (UID: \"65454596-9d51-4fca-b5eb-c326dc4b3bb6\") " pod="openstack/nova-api-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.564755 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0b4428b-bbd0-4a02-8823-47813ec73b24-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"c0b4428b-bbd0-4a02-8823-47813ec73b24\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.565352 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65454596-9d51-4fca-b5eb-c326dc4b3bb6-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"65454596-9d51-4fca-b5eb-c326dc4b3bb6\") " pod="openstack/nova-api-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.599267 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0b4428b-bbd0-4a02-8823-47813ec73b24-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"c0b4428b-bbd0-4a02-8823-47813ec73b24\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.603879 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65454596-9d51-4fca-b5eb-c326dc4b3bb6-config-data\") pod \"nova-api-0\" (UID: \"65454596-9d51-4fca-b5eb-c326dc4b3bb6\") " pod="openstack/nova-api-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.612660 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-skdc7\" (UniqueName: \"kubernetes.io/projected/65454596-9d51-4fca-b5eb-c326dc4b3bb6-kube-api-access-skdc7\") pod \"nova-api-0\" (UID: \"65454596-9d51-4fca-b5eb-c326dc4b3bb6\") " pod="openstack/nova-api-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.645276 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.647024 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.652066 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.656254 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7e3a56d5-7ebc-4bab-84fd-ac67e93145d8-logs\") pod \"nova-metadata-0\" (UID: \"7e3a56d5-7ebc-4bab-84fd-ac67e93145d8\") " pod="openstack/nova-metadata-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.656532 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e3a56d5-7ebc-4bab-84fd-ac67e93145d8-config-data\") pod \"nova-metadata-0\" (UID: \"7e3a56d5-7ebc-4bab-84fd-ac67e93145d8\") " pod="openstack/nova-metadata-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.657564 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7e3a56d5-7ebc-4bab-84fd-ac67e93145d8-logs\") pod \"nova-metadata-0\" (UID: \"7e3a56d5-7ebc-4bab-84fd-ac67e93145d8\") " pod="openstack/nova-metadata-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.657555 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mfbt2\" (UniqueName: \"kubernetes.io/projected/7e3a56d5-7ebc-4bab-84fd-ac67e93145d8-kube-api-access-mfbt2\") pod \"nova-metadata-0\" (UID: \"7e3a56d5-7ebc-4bab-84fd-ac67e93145d8\") " pod="openstack/nova-metadata-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.658029 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3a56d5-7ebc-4bab-84fd-ac67e93145d8-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"7e3a56d5-7ebc-4bab-84fd-ac67e93145d8\") " pod="openstack/nova-metadata-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.670510 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cgk88\" (UniqueName: \"kubernetes.io/projected/c0b4428b-bbd0-4a02-8823-47813ec73b24-kube-api-access-cgk88\") pod \"nova-cell1-novncproxy-0\" (UID: \"c0b4428b-bbd0-4a02-8823-47813ec73b24\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.671657 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e3a56d5-7ebc-4bab-84fd-ac67e93145d8-config-data\") pod \"nova-metadata-0\" (UID: \"7e3a56d5-7ebc-4bab-84fd-ac67e93145d8\") " pod="openstack/nova-metadata-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.697025 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.701562 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3a56d5-7ebc-4bab-84fd-ac67e93145d8-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"7e3a56d5-7ebc-4bab-84fd-ac67e93145d8\") " pod="openstack/nova-metadata-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.711281 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-9b86998b5-6ccd6"] Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.715244 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-9b86998b5-6ccd6" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.719268 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.753894 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mfbt2\" (UniqueName: \"kubernetes.io/projected/7e3a56d5-7ebc-4bab-84fd-ac67e93145d8-kube-api-access-mfbt2\") pod \"nova-metadata-0\" (UID: \"7e3a56d5-7ebc-4bab-84fd-ac67e93145d8\") " pod="openstack/nova-metadata-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.764582 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e84c448d-0f33-44b9-a68d-5451111935f2-config\") pod \"dnsmasq-dns-9b86998b5-6ccd6\" (UID: \"e84c448d-0f33-44b9-a68d-5451111935f2\") " pod="openstack/dnsmasq-dns-9b86998b5-6ccd6" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.768656 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q86n7\" (UniqueName: \"kubernetes.io/projected/e84c448d-0f33-44b9-a68d-5451111935f2-kube-api-access-q86n7\") pod \"dnsmasq-dns-9b86998b5-6ccd6\" (UID: \"e84c448d-0f33-44b9-a68d-5451111935f2\") " pod="openstack/dnsmasq-dns-9b86998b5-6ccd6" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.769133 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e84c448d-0f33-44b9-a68d-5451111935f2-ovsdbserver-sb\") pod \"dnsmasq-dns-9b86998b5-6ccd6\" (UID: \"e84c448d-0f33-44b9-a68d-5451111935f2\") " pod="openstack/dnsmasq-dns-9b86998b5-6ccd6" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.769268 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k6zxw\" (UniqueName: \"kubernetes.io/projected/a7089c58-3ddf-493a-a521-6cbb1c8f4cba-kube-api-access-k6zxw\") pod \"nova-scheduler-0\" (UID: \"a7089c58-3ddf-493a-a521-6cbb1c8f4cba\") " pod="openstack/nova-scheduler-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.773393 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.774532 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e84c448d-0f33-44b9-a68d-5451111935f2-dns-svc\") pod \"dnsmasq-dns-9b86998b5-6ccd6\" (UID: \"e84c448d-0f33-44b9-a68d-5451111935f2\") " pod="openstack/dnsmasq-dns-9b86998b5-6ccd6" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.775520 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7089c58-3ddf-493a-a521-6cbb1c8f4cba-config-data\") pod \"nova-scheduler-0\" (UID: \"a7089c58-3ddf-493a-a521-6cbb1c8f4cba\") " pod="openstack/nova-scheduler-0" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.775726 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e84c448d-0f33-44b9-a68d-5451111935f2-dns-swift-storage-0\") pod \"dnsmasq-dns-9b86998b5-6ccd6\" (UID: \"e84c448d-0f33-44b9-a68d-5451111935f2\") " pod="openstack/dnsmasq-dns-9b86998b5-6ccd6" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.775771 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e84c448d-0f33-44b9-a68d-5451111935f2-ovsdbserver-nb\") pod \"dnsmasq-dns-9b86998b5-6ccd6\" (UID: \"e84c448d-0f33-44b9-a68d-5451111935f2\") " pod="openstack/dnsmasq-dns-9b86998b5-6ccd6" Dec 09 15:35:21 crc kubenswrapper[4716]: I1209 15:35:21.775822 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7089c58-3ddf-493a-a521-6cbb1c8f4cba-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"a7089c58-3ddf-493a-a521-6cbb1c8f4cba\") " pod="openstack/nova-scheduler-0" Dec 09 15:35:22 crc kubenswrapper[4716]: I1209 15:35:21.826552 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-9b86998b5-6ccd6"] Dec 09 15:35:22 crc kubenswrapper[4716]: I1209 15:35:21.947511 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e84c448d-0f33-44b9-a68d-5451111935f2-config\") pod \"dnsmasq-dns-9b86998b5-6ccd6\" (UID: \"e84c448d-0f33-44b9-a68d-5451111935f2\") " pod="openstack/dnsmasq-dns-9b86998b5-6ccd6" Dec 09 15:35:22 crc kubenswrapper[4716]: I1209 15:35:21.947588 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q86n7\" (UniqueName: \"kubernetes.io/projected/e84c448d-0f33-44b9-a68d-5451111935f2-kube-api-access-q86n7\") pod \"dnsmasq-dns-9b86998b5-6ccd6\" (UID: \"e84c448d-0f33-44b9-a68d-5451111935f2\") " pod="openstack/dnsmasq-dns-9b86998b5-6ccd6" Dec 09 15:35:22 crc kubenswrapper[4716]: I1209 15:35:21.947873 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e84c448d-0f33-44b9-a68d-5451111935f2-ovsdbserver-sb\") pod \"dnsmasq-dns-9b86998b5-6ccd6\" (UID: \"e84c448d-0f33-44b9-a68d-5451111935f2\") " pod="openstack/dnsmasq-dns-9b86998b5-6ccd6" Dec 09 15:35:22 crc kubenswrapper[4716]: I1209 15:35:21.947948 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k6zxw\" (UniqueName: \"kubernetes.io/projected/a7089c58-3ddf-493a-a521-6cbb1c8f4cba-kube-api-access-k6zxw\") pod \"nova-scheduler-0\" (UID: \"a7089c58-3ddf-493a-a521-6cbb1c8f4cba\") " pod="openstack/nova-scheduler-0" Dec 09 15:35:22 crc kubenswrapper[4716]: I1209 15:35:21.947987 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e84c448d-0f33-44b9-a68d-5451111935f2-dns-svc\") pod \"dnsmasq-dns-9b86998b5-6ccd6\" (UID: \"e84c448d-0f33-44b9-a68d-5451111935f2\") " pod="openstack/dnsmasq-dns-9b86998b5-6ccd6" Dec 09 15:35:22 crc kubenswrapper[4716]: I1209 15:35:21.948099 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7089c58-3ddf-493a-a521-6cbb1c8f4cba-config-data\") pod \"nova-scheduler-0\" (UID: \"a7089c58-3ddf-493a-a521-6cbb1c8f4cba\") " pod="openstack/nova-scheduler-0" Dec 09 15:35:22 crc kubenswrapper[4716]: I1209 15:35:21.948182 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e84c448d-0f33-44b9-a68d-5451111935f2-dns-swift-storage-0\") pod \"dnsmasq-dns-9b86998b5-6ccd6\" (UID: \"e84c448d-0f33-44b9-a68d-5451111935f2\") " pod="openstack/dnsmasq-dns-9b86998b5-6ccd6" Dec 09 15:35:22 crc kubenswrapper[4716]: I1209 15:35:21.948207 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e84c448d-0f33-44b9-a68d-5451111935f2-ovsdbserver-nb\") pod \"dnsmasq-dns-9b86998b5-6ccd6\" (UID: \"e84c448d-0f33-44b9-a68d-5451111935f2\") " pod="openstack/dnsmasq-dns-9b86998b5-6ccd6" Dec 09 15:35:22 crc kubenswrapper[4716]: I1209 15:35:21.948233 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7089c58-3ddf-493a-a521-6cbb1c8f4cba-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"a7089c58-3ddf-493a-a521-6cbb1c8f4cba\") " pod="openstack/nova-scheduler-0" Dec 09 15:35:22 crc kubenswrapper[4716]: I1209 15:35:21.950168 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e84c448d-0f33-44b9-a68d-5451111935f2-config\") pod \"dnsmasq-dns-9b86998b5-6ccd6\" (UID: \"e84c448d-0f33-44b9-a68d-5451111935f2\") " pod="openstack/dnsmasq-dns-9b86998b5-6ccd6" Dec 09 15:35:22 crc kubenswrapper[4716]: I1209 15:35:21.953480 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e84c448d-0f33-44b9-a68d-5451111935f2-ovsdbserver-sb\") pod \"dnsmasq-dns-9b86998b5-6ccd6\" (UID: \"e84c448d-0f33-44b9-a68d-5451111935f2\") " pod="openstack/dnsmasq-dns-9b86998b5-6ccd6" Dec 09 15:35:22 crc kubenswrapper[4716]: I1209 15:35:21.954701 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e84c448d-0f33-44b9-a68d-5451111935f2-dns-swift-storage-0\") pod \"dnsmasq-dns-9b86998b5-6ccd6\" (UID: \"e84c448d-0f33-44b9-a68d-5451111935f2\") " pod="openstack/dnsmasq-dns-9b86998b5-6ccd6" Dec 09 15:35:22 crc kubenswrapper[4716]: I1209 15:35:21.954924 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e84c448d-0f33-44b9-a68d-5451111935f2-dns-svc\") pod \"dnsmasq-dns-9b86998b5-6ccd6\" (UID: \"e84c448d-0f33-44b9-a68d-5451111935f2\") " pod="openstack/dnsmasq-dns-9b86998b5-6ccd6" Dec 09 15:35:22 crc kubenswrapper[4716]: I1209 15:35:21.955731 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e84c448d-0f33-44b9-a68d-5451111935f2-ovsdbserver-nb\") pod \"dnsmasq-dns-9b86998b5-6ccd6\" (UID: \"e84c448d-0f33-44b9-a68d-5451111935f2\") " pod="openstack/dnsmasq-dns-9b86998b5-6ccd6" Dec 09 15:35:22 crc kubenswrapper[4716]: I1209 15:35:21.959882 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7089c58-3ddf-493a-a521-6cbb1c8f4cba-config-data\") pod \"nova-scheduler-0\" (UID: \"a7089c58-3ddf-493a-a521-6cbb1c8f4cba\") " pod="openstack/nova-scheduler-0" Dec 09 15:35:22 crc kubenswrapper[4716]: I1209 15:35:21.997538 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 09 15:35:22 crc kubenswrapper[4716]: I1209 15:35:21.998652 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7089c58-3ddf-493a-a521-6cbb1c8f4cba-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"a7089c58-3ddf-493a-a521-6cbb1c8f4cba\") " pod="openstack/nova-scheduler-0" Dec 09 15:35:22 crc kubenswrapper[4716]: I1209 15:35:22.009198 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k6zxw\" (UniqueName: \"kubernetes.io/projected/a7089c58-3ddf-493a-a521-6cbb1c8f4cba-kube-api-access-k6zxw\") pod \"nova-scheduler-0\" (UID: \"a7089c58-3ddf-493a-a521-6cbb1c8f4cba\") " pod="openstack/nova-scheduler-0" Dec 09 15:35:22 crc kubenswrapper[4716]: I1209 15:35:22.026366 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q86n7\" (UniqueName: \"kubernetes.io/projected/e84c448d-0f33-44b9-a68d-5451111935f2-kube-api-access-q86n7\") pod \"dnsmasq-dns-9b86998b5-6ccd6\" (UID: \"e84c448d-0f33-44b9-a68d-5451111935f2\") " pod="openstack/dnsmasq-dns-9b86998b5-6ccd6" Dec 09 15:35:22 crc kubenswrapper[4716]: I1209 15:35:22.041418 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 09 15:35:22 crc kubenswrapper[4716]: I1209 15:35:22.069878 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-9b86998b5-6ccd6" Dec 09 15:35:22 crc kubenswrapper[4716]: I1209 15:35:22.968708 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-9gtbn"] Dec 09 15:35:22 crc kubenswrapper[4716]: I1209 15:35:22.971679 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-9gtbn" Dec 09 15:35:22 crc kubenswrapper[4716]: I1209 15:35:22.975077 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Dec 09 15:35:22 crc kubenswrapper[4716]: I1209 15:35:22.975159 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Dec 09 15:35:22 crc kubenswrapper[4716]: I1209 15:35:22.994014 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-9gtbn"] Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.083183 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f95389e-c579-4294-b7a5-f22c93e45d85-config-data\") pod \"nova-cell1-conductor-db-sync-9gtbn\" (UID: \"8f95389e-c579-4294-b7a5-f22c93e45d85\") " pod="openstack/nova-cell1-conductor-db-sync-9gtbn" Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.083521 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gq82m\" (UniqueName: \"kubernetes.io/projected/8f95389e-c579-4294-b7a5-f22c93e45d85-kube-api-access-gq82m\") pod \"nova-cell1-conductor-db-sync-9gtbn\" (UID: \"8f95389e-c579-4294-b7a5-f22c93e45d85\") " pod="openstack/nova-cell1-conductor-db-sync-9gtbn" Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.083677 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f95389e-c579-4294-b7a5-f22c93e45d85-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-9gtbn\" (UID: \"8f95389e-c579-4294-b7a5-f22c93e45d85\") " pod="openstack/nova-cell1-conductor-db-sync-9gtbn" Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.083743 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f95389e-c579-4294-b7a5-f22c93e45d85-scripts\") pod \"nova-cell1-conductor-db-sync-9gtbn\" (UID: \"8f95389e-c579-4294-b7a5-f22c93e45d85\") " pod="openstack/nova-cell1-conductor-db-sync-9gtbn" Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.167249 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-sync-lzlrv"] Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.169886 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-lzlrv" Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.177957 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.178207 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.178302 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.178307 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-pmtlh" Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.183226 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-lzlrv"] Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.202944 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gq82m\" (UniqueName: \"kubernetes.io/projected/8f95389e-c579-4294-b7a5-f22c93e45d85-kube-api-access-gq82m\") pod \"nova-cell1-conductor-db-sync-9gtbn\" (UID: \"8f95389e-c579-4294-b7a5-f22c93e45d85\") " pod="openstack/nova-cell1-conductor-db-sync-9gtbn" Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.203693 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f95389e-c579-4294-b7a5-f22c93e45d85-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-9gtbn\" (UID: \"8f95389e-c579-4294-b7a5-f22c93e45d85\") " pod="openstack/nova-cell1-conductor-db-sync-9gtbn" Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.203738 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f95389e-c579-4294-b7a5-f22c93e45d85-scripts\") pod \"nova-cell1-conductor-db-sync-9gtbn\" (UID: \"8f95389e-c579-4294-b7a5-f22c93e45d85\") " pod="openstack/nova-cell1-conductor-db-sync-9gtbn" Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.205223 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f95389e-c579-4294-b7a5-f22c93e45d85-config-data\") pod \"nova-cell1-conductor-db-sync-9gtbn\" (UID: \"8f95389e-c579-4294-b7a5-f22c93e45d85\") " pod="openstack/nova-cell1-conductor-db-sync-9gtbn" Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.211191 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f95389e-c579-4294-b7a5-f22c93e45d85-config-data\") pod \"nova-cell1-conductor-db-sync-9gtbn\" (UID: \"8f95389e-c579-4294-b7a5-f22c93e45d85\") " pod="openstack/nova-cell1-conductor-db-sync-9gtbn" Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.211199 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f95389e-c579-4294-b7a5-f22c93e45d85-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-9gtbn\" (UID: \"8f95389e-c579-4294-b7a5-f22c93e45d85\") " pod="openstack/nova-cell1-conductor-db-sync-9gtbn" Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.211379 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f95389e-c579-4294-b7a5-f22c93e45d85-scripts\") pod \"nova-cell1-conductor-db-sync-9gtbn\" (UID: \"8f95389e-c579-4294-b7a5-f22c93e45d85\") " pod="openstack/nova-cell1-conductor-db-sync-9gtbn" Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.230934 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gq82m\" (UniqueName: \"kubernetes.io/projected/8f95389e-c579-4294-b7a5-f22c93e45d85-kube-api-access-gq82m\") pod \"nova-cell1-conductor-db-sync-9gtbn\" (UID: \"8f95389e-c579-4294-b7a5-f22c93e45d85\") " pod="openstack/nova-cell1-conductor-db-sync-9gtbn" Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.302412 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-9gtbn" Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.309795 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/560906c8-b759-4faa-9a29-86f3c8f92cdb-config-data\") pod \"aodh-db-sync-lzlrv\" (UID: \"560906c8-b759-4faa-9a29-86f3c8f92cdb\") " pod="openstack/aodh-db-sync-lzlrv" Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.309936 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/560906c8-b759-4faa-9a29-86f3c8f92cdb-scripts\") pod \"aodh-db-sync-lzlrv\" (UID: \"560906c8-b759-4faa-9a29-86f3c8f92cdb\") " pod="openstack/aodh-db-sync-lzlrv" Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.310010 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/560906c8-b759-4faa-9a29-86f3c8f92cdb-combined-ca-bundle\") pod \"aodh-db-sync-lzlrv\" (UID: \"560906c8-b759-4faa-9a29-86f3c8f92cdb\") " pod="openstack/aodh-db-sync-lzlrv" Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.310135 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9rdtq\" (UniqueName: \"kubernetes.io/projected/560906c8-b759-4faa-9a29-86f3c8f92cdb-kube-api-access-9rdtq\") pod \"aodh-db-sync-lzlrv\" (UID: \"560906c8-b759-4faa-9a29-86f3c8f92cdb\") " pod="openstack/aodh-db-sync-lzlrv" Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.416153 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9rdtq\" (UniqueName: \"kubernetes.io/projected/560906c8-b759-4faa-9a29-86f3c8f92cdb-kube-api-access-9rdtq\") pod \"aodh-db-sync-lzlrv\" (UID: \"560906c8-b759-4faa-9a29-86f3c8f92cdb\") " pod="openstack/aodh-db-sync-lzlrv" Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.416588 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/560906c8-b759-4faa-9a29-86f3c8f92cdb-config-data\") pod \"aodh-db-sync-lzlrv\" (UID: \"560906c8-b759-4faa-9a29-86f3c8f92cdb\") " pod="openstack/aodh-db-sync-lzlrv" Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.416720 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/560906c8-b759-4faa-9a29-86f3c8f92cdb-scripts\") pod \"aodh-db-sync-lzlrv\" (UID: \"560906c8-b759-4faa-9a29-86f3c8f92cdb\") " pod="openstack/aodh-db-sync-lzlrv" Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.416789 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/560906c8-b759-4faa-9a29-86f3c8f92cdb-combined-ca-bundle\") pod \"aodh-db-sync-lzlrv\" (UID: \"560906c8-b759-4faa-9a29-86f3c8f92cdb\") " pod="openstack/aodh-db-sync-lzlrv" Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.427166 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/560906c8-b759-4faa-9a29-86f3c8f92cdb-scripts\") pod \"aodh-db-sync-lzlrv\" (UID: \"560906c8-b759-4faa-9a29-86f3c8f92cdb\") " pod="openstack/aodh-db-sync-lzlrv" Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.428163 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/560906c8-b759-4faa-9a29-86f3c8f92cdb-combined-ca-bundle\") pod \"aodh-db-sync-lzlrv\" (UID: \"560906c8-b759-4faa-9a29-86f3c8f92cdb\") " pod="openstack/aodh-db-sync-lzlrv" Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.439015 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9rdtq\" (UniqueName: \"kubernetes.io/projected/560906c8-b759-4faa-9a29-86f3c8f92cdb-kube-api-access-9rdtq\") pod \"aodh-db-sync-lzlrv\" (UID: \"560906c8-b759-4faa-9a29-86f3c8f92cdb\") " pod="openstack/aodh-db-sync-lzlrv" Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.439909 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/560906c8-b759-4faa-9a29-86f3c8f92cdb-config-data\") pod \"aodh-db-sync-lzlrv\" (UID: \"560906c8-b759-4faa-9a29-86f3c8f92cdb\") " pod="openstack/aodh-db-sync-lzlrv" Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.497990 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-lzlrv" Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.614487 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-2zp9h"] Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.660962 4716 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.661384 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 09 15:35:23 crc kubenswrapper[4716]: W1209 15:35:23.687046 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod65454596_9d51_4fca_b5eb_c326dc4b3bb6.slice/crio-91b8296ed79479be59467f010f1961c3fff361de6cfb44e28df677b5e115775e WatchSource:0}: Error finding container 91b8296ed79479be59467f010f1961c3fff361de6cfb44e28df677b5e115775e: Status 404 returned error can't find the container with id 91b8296ed79479be59467f010f1961c3fff361de6cfb44e28df677b5e115775e Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.695347 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.721789 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.739801 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-9b86998b5-6ccd6"] Dec 09 15:35:23 crc kubenswrapper[4716]: I1209 15:35:23.762864 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 09 15:35:23 crc kubenswrapper[4716]: W1209 15:35:23.781773 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode84c448d_0f33_44b9_a68d_5451111935f2.slice/crio-d4f26d2a862fe2de1137af69eb5027dc87fd5aaf282efd55c29e1a4d3638ceaa WatchSource:0}: Error finding container d4f26d2a862fe2de1137af69eb5027dc87fd5aaf282efd55c29e1a4d3638ceaa: Status 404 returned error can't find the container with id d4f26d2a862fe2de1137af69eb5027dc87fd5aaf282efd55c29e1a4d3638ceaa Dec 09 15:35:24 crc kubenswrapper[4716]: I1209 15:35:24.049059 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-9gtbn"] Dec 09 15:35:24 crc kubenswrapper[4716]: I1209 15:35:24.198276 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-lzlrv"] Dec 09 15:35:24 crc kubenswrapper[4716]: I1209 15:35:24.411387 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-lzlrv" event={"ID":"560906c8-b759-4faa-9a29-86f3c8f92cdb","Type":"ContainerStarted","Data":"83ca10fb0fe9517cc5238120ef112d452e0ad10d1f5edd25dedc7fa057c39159"} Dec 09 15:35:24 crc kubenswrapper[4716]: I1209 15:35:24.421079 4716 generic.go:334] "Generic (PLEG): container finished" podID="e84c448d-0f33-44b9-a68d-5451111935f2" containerID="8ef1e862b58f4300d19931af74b351b6ad8cacfcde69e7d88ade78c765aaea41" exitCode=0 Dec 09 15:35:24 crc kubenswrapper[4716]: I1209 15:35:24.422028 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-9b86998b5-6ccd6" event={"ID":"e84c448d-0f33-44b9-a68d-5451111935f2","Type":"ContainerDied","Data":"8ef1e862b58f4300d19931af74b351b6ad8cacfcde69e7d88ade78c765aaea41"} Dec 09 15:35:24 crc kubenswrapper[4716]: I1209 15:35:24.422104 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-9b86998b5-6ccd6" event={"ID":"e84c448d-0f33-44b9-a68d-5451111935f2","Type":"ContainerStarted","Data":"d4f26d2a862fe2de1137af69eb5027dc87fd5aaf282efd55c29e1a4d3638ceaa"} Dec 09 15:35:24 crc kubenswrapper[4716]: I1209 15:35:24.426381 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7e3a56d5-7ebc-4bab-84fd-ac67e93145d8","Type":"ContainerStarted","Data":"a513fcc71b74e0e8012272146586bf38b19811da00b1eebfd1821e9d02180e18"} Dec 09 15:35:24 crc kubenswrapper[4716]: I1209 15:35:24.445864 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a7089c58-3ddf-493a-a521-6cbb1c8f4cba","Type":"ContainerStarted","Data":"3705b8b75e5611b4c5c59bb7ff1cb755e95415dcce88f528068363c26e449eaa"} Dec 09 15:35:24 crc kubenswrapper[4716]: I1209 15:35:24.497938 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"65454596-9d51-4fca-b5eb-c326dc4b3bb6","Type":"ContainerStarted","Data":"91b8296ed79479be59467f010f1961c3fff361de6cfb44e28df677b5e115775e"} Dec 09 15:35:24 crc kubenswrapper[4716]: I1209 15:35:24.508039 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"c0b4428b-bbd0-4a02-8823-47813ec73b24","Type":"ContainerStarted","Data":"7afe654877063a2699c944467fa9b60b18e135c509a7c1c08119530306202b21"} Dec 09 15:35:24 crc kubenswrapper[4716]: I1209 15:35:24.534396 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-9gtbn" event={"ID":"8f95389e-c579-4294-b7a5-f22c93e45d85","Type":"ContainerStarted","Data":"297b248ce8a37da2b619eddbacd46e33937aff3069e0b86332f34f04a75c9a3f"} Dec 09 15:35:24 crc kubenswrapper[4716]: I1209 15:35:24.534714 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-9gtbn" event={"ID":"8f95389e-c579-4294-b7a5-f22c93e45d85","Type":"ContainerStarted","Data":"46544c4fb7ddd7320994262282552c1fa32de86d45753b0401e24786fdc5b188"} Dec 09 15:35:24 crc kubenswrapper[4716]: I1209 15:35:24.552168 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-2zp9h" event={"ID":"2a66dc81-048f-4d25-b04f-16e8063d92b7","Type":"ContainerStarted","Data":"589c9999c73663a2a056f75818b84ace2af485ba6208cdb6bba3d61f6b137093"} Dec 09 15:35:24 crc kubenswrapper[4716]: I1209 15:35:24.552261 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-2zp9h" event={"ID":"2a66dc81-048f-4d25-b04f-16e8063d92b7","Type":"ContainerStarted","Data":"b2eb1527b2e309fa50a8ef038aabd3cf7d6d10db343224a3fa73d219d256d8ef"} Dec 09 15:35:24 crc kubenswrapper[4716]: I1209 15:35:24.592174 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-9gtbn" podStartSLOduration=2.59213813 podStartE2EDuration="2.59213813s" podCreationTimestamp="2025-12-09 15:35:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:35:24.556876321 +0000 UTC m=+1611.711620309" watchObservedRunningTime="2025-12-09 15:35:24.59213813 +0000 UTC m=+1611.746882118" Dec 09 15:35:24 crc kubenswrapper[4716]: I1209 15:35:24.637467 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-2zp9h" podStartSLOduration=3.637443313 podStartE2EDuration="3.637443313s" podCreationTimestamp="2025-12-09 15:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:35:24.589374922 +0000 UTC m=+1611.744118920" watchObservedRunningTime="2025-12-09 15:35:24.637443313 +0000 UTC m=+1611.792187301" Dec 09 15:35:25 crc kubenswrapper[4716]: I1209 15:35:25.610904 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-9b86998b5-6ccd6" event={"ID":"e84c448d-0f33-44b9-a68d-5451111935f2","Type":"ContainerStarted","Data":"94e4d56923bc1f77d9ab2429df23c74d42758061a35a2ababf498a3bb464ef50"} Dec 09 15:35:25 crc kubenswrapper[4716]: I1209 15:35:25.611267 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-9b86998b5-6ccd6" Dec 09 15:35:25 crc kubenswrapper[4716]: I1209 15:35:25.644282 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-9b86998b5-6ccd6" podStartSLOduration=4.644258446 podStartE2EDuration="4.644258446s" podCreationTimestamp="2025-12-09 15:35:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:35:25.634145109 +0000 UTC m=+1612.788889107" watchObservedRunningTime="2025-12-09 15:35:25.644258446 +0000 UTC m=+1612.799002434" Dec 09 15:35:25 crc kubenswrapper[4716]: I1209 15:35:25.742326 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 09 15:35:25 crc kubenswrapper[4716]: I1209 15:35:25.763250 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 09 15:35:31 crc kubenswrapper[4716]: I1209 15:35:31.742209 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"65454596-9d51-4fca-b5eb-c326dc4b3bb6","Type":"ContainerStarted","Data":"58fd8d5da688890a47d1cb2a502aa495bfadb57ec486a533e334d3c00bf5375e"} Dec 09 15:35:31 crc kubenswrapper[4716]: I1209 15:35:31.742809 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"65454596-9d51-4fca-b5eb-c326dc4b3bb6","Type":"ContainerStarted","Data":"45f5fa63a69b2f7b375897fa5387a22fd7853e208cf5ff1fdcd0f8d360237ffb"} Dec 09 15:35:31 crc kubenswrapper[4716]: I1209 15:35:31.745483 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"c0b4428b-bbd0-4a02-8823-47813ec73b24","Type":"ContainerStarted","Data":"c9cdf8bdd05218dcae4dd7bd9ee702ea5aeb7ee7321ef5baa615f8de34603028"} Dec 09 15:35:31 crc kubenswrapper[4716]: I1209 15:35:31.745643 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="c0b4428b-bbd0-4a02-8823-47813ec73b24" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://c9cdf8bdd05218dcae4dd7bd9ee702ea5aeb7ee7321ef5baa615f8de34603028" gracePeriod=30 Dec 09 15:35:31 crc kubenswrapper[4716]: I1209 15:35:31.753293 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-lzlrv" event={"ID":"560906c8-b759-4faa-9a29-86f3c8f92cdb","Type":"ContainerStarted","Data":"d330b664f373e2ad2b15415e7edc2cba5711eed0e0a1b40b85e0d50c5e2fb448"} Dec 09 15:35:31 crc kubenswrapper[4716]: I1209 15:35:31.755534 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7e3a56d5-7ebc-4bab-84fd-ac67e93145d8","Type":"ContainerStarted","Data":"d34d8d270fe368cd35294ba5940f5756b2d244ecf5ed7e03f78ad23faa5d5ef5"} Dec 09 15:35:31 crc kubenswrapper[4716]: I1209 15:35:31.755584 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7e3a56d5-7ebc-4bab-84fd-ac67e93145d8","Type":"ContainerStarted","Data":"bc73e135a464035ffbb1ad6c456f8d3a22f342fc42ab8926f999ecc120028554"} Dec 09 15:35:31 crc kubenswrapper[4716]: I1209 15:35:31.755747 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="7e3a56d5-7ebc-4bab-84fd-ac67e93145d8" containerName="nova-metadata-log" containerID="cri-o://bc73e135a464035ffbb1ad6c456f8d3a22f342fc42ab8926f999ecc120028554" gracePeriod=30 Dec 09 15:35:31 crc kubenswrapper[4716]: I1209 15:35:31.756224 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="7e3a56d5-7ebc-4bab-84fd-ac67e93145d8" containerName="nova-metadata-metadata" containerID="cri-o://d34d8d270fe368cd35294ba5940f5756b2d244ecf5ed7e03f78ad23faa5d5ef5" gracePeriod=30 Dec 09 15:35:31 crc kubenswrapper[4716]: I1209 15:35:31.758140 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a7089c58-3ddf-493a-a521-6cbb1c8f4cba","Type":"ContainerStarted","Data":"faf080d62f15b006030c17b96f40208ebcfa6e515409a6944095ea1381560254"} Dec 09 15:35:31 crc kubenswrapper[4716]: I1209 15:35:31.776436 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 09 15:35:31 crc kubenswrapper[4716]: I1209 15:35:31.777047 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 09 15:35:31 crc kubenswrapper[4716]: I1209 15:35:31.779441 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.853021341 podStartE2EDuration="10.779405289s" podCreationTimestamp="2025-12-09 15:35:21 +0000 UTC" firstStartedPulling="2025-12-09 15:35:23.7262209 +0000 UTC m=+1610.880964888" lastFinishedPulling="2025-12-09 15:35:30.652604848 +0000 UTC m=+1617.807348836" observedRunningTime="2025-12-09 15:35:31.774410388 +0000 UTC m=+1618.929154376" watchObservedRunningTime="2025-12-09 15:35:31.779405289 +0000 UTC m=+1618.934149287" Dec 09 15:35:31 crc kubenswrapper[4716]: I1209 15:35:31.800992 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-sync-lzlrv" podStartSLOduration=2.323703614 podStartE2EDuration="8.80096649s" podCreationTimestamp="2025-12-09 15:35:23 +0000 UTC" firstStartedPulling="2025-12-09 15:35:24.24337875 +0000 UTC m=+1611.398122738" lastFinishedPulling="2025-12-09 15:35:30.720641626 +0000 UTC m=+1617.875385614" observedRunningTime="2025-12-09 15:35:31.78931653 +0000 UTC m=+1618.944060518" watchObservedRunningTime="2025-12-09 15:35:31.80096649 +0000 UTC m=+1618.955710478" Dec 09 15:35:31 crc kubenswrapper[4716]: I1209 15:35:31.827421 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=4.029771158 podStartE2EDuration="10.827396389s" podCreationTimestamp="2025-12-09 15:35:21 +0000 UTC" firstStartedPulling="2025-12-09 15:35:23.855030169 +0000 UTC m=+1611.009774157" lastFinishedPulling="2025-12-09 15:35:30.6526554 +0000 UTC m=+1617.807399388" observedRunningTime="2025-12-09 15:35:31.808162804 +0000 UTC m=+1618.962906792" watchObservedRunningTime="2025-12-09 15:35:31.827396389 +0000 UTC m=+1618.982140377" Dec 09 15:35:31 crc kubenswrapper[4716]: I1209 15:35:31.837251 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.853895076 podStartE2EDuration="10.837231017s" podCreationTimestamp="2025-12-09 15:35:21 +0000 UTC" firstStartedPulling="2025-12-09 15:35:23.660658213 +0000 UTC m=+1610.815402201" lastFinishedPulling="2025-12-09 15:35:30.643994154 +0000 UTC m=+1617.798738142" observedRunningTime="2025-12-09 15:35:31.832773281 +0000 UTC m=+1618.987517269" watchObservedRunningTime="2025-12-09 15:35:31.837231017 +0000 UTC m=+1618.991975005" Dec 09 15:35:31 crc kubenswrapper[4716]: I1209 15:35:31.859023 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=4.010635816 podStartE2EDuration="10.859002494s" podCreationTimestamp="2025-12-09 15:35:21 +0000 UTC" firstStartedPulling="2025-12-09 15:35:23.766007547 +0000 UTC m=+1610.920751535" lastFinishedPulling="2025-12-09 15:35:30.614374225 +0000 UTC m=+1617.769118213" observedRunningTime="2025-12-09 15:35:31.856003529 +0000 UTC m=+1619.010747507" watchObservedRunningTime="2025-12-09 15:35:31.859002494 +0000 UTC m=+1619.013746482" Dec 09 15:35:31 crc kubenswrapper[4716]: I1209 15:35:31.998952 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 09 15:35:31 crc kubenswrapper[4716]: I1209 15:35:31.999347 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 09 15:35:32 crc kubenswrapper[4716]: I1209 15:35:32.044754 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Dec 09 15:35:32 crc kubenswrapper[4716]: I1209 15:35:32.044793 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Dec 09 15:35:32 crc kubenswrapper[4716]: I1209 15:35:32.072692 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-9b86998b5-6ccd6" Dec 09 15:35:32 crc kubenswrapper[4716]: I1209 15:35:32.080446 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Dec 09 15:35:32 crc kubenswrapper[4716]: I1209 15:35:32.164403 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7756b9d78c-h2zld"] Dec 09 15:35:32 crc kubenswrapper[4716]: I1209 15:35:32.169421 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7756b9d78c-h2zld" podUID="1893ef85-1e86-4247-8950-95c26985ccbc" containerName="dnsmasq-dns" containerID="cri-o://15030166a469f0c01e34beed8ba32884e53efc7db49d2f1509100c1c5b992fa3" gracePeriod=10 Dec 09 15:35:32 crc kubenswrapper[4716]: I1209 15:35:32.386125 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-7756b9d78c-h2zld" podUID="1893ef85-1e86-4247-8950-95c26985ccbc" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.209:5353: connect: connection refused" Dec 09 15:35:32 crc kubenswrapper[4716]: I1209 15:35:32.780598 4716 generic.go:334] "Generic (PLEG): container finished" podID="8f95389e-c579-4294-b7a5-f22c93e45d85" containerID="297b248ce8a37da2b619eddbacd46e33937aff3069e0b86332f34f04a75c9a3f" exitCode=0 Dec 09 15:35:32 crc kubenswrapper[4716]: I1209 15:35:32.780759 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-9gtbn" event={"ID":"8f95389e-c579-4294-b7a5-f22c93e45d85","Type":"ContainerDied","Data":"297b248ce8a37da2b619eddbacd46e33937aff3069e0b86332f34f04a75c9a3f"} Dec 09 15:35:32 crc kubenswrapper[4716]: I1209 15:35:32.798919 4716 generic.go:334] "Generic (PLEG): container finished" podID="1893ef85-1e86-4247-8950-95c26985ccbc" containerID="15030166a469f0c01e34beed8ba32884e53efc7db49d2f1509100c1c5b992fa3" exitCode=0 Dec 09 15:35:32 crc kubenswrapper[4716]: I1209 15:35:32.799044 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7756b9d78c-h2zld" event={"ID":"1893ef85-1e86-4247-8950-95c26985ccbc","Type":"ContainerDied","Data":"15030166a469f0c01e34beed8ba32884e53efc7db49d2f1509100c1c5b992fa3"} Dec 09 15:35:32 crc kubenswrapper[4716]: I1209 15:35:32.805997 4716 generic.go:334] "Generic (PLEG): container finished" podID="7e3a56d5-7ebc-4bab-84fd-ac67e93145d8" containerID="d34d8d270fe368cd35294ba5940f5756b2d244ecf5ed7e03f78ad23faa5d5ef5" exitCode=0 Dec 09 15:35:32 crc kubenswrapper[4716]: I1209 15:35:32.806043 4716 generic.go:334] "Generic (PLEG): container finished" podID="7e3a56d5-7ebc-4bab-84fd-ac67e93145d8" containerID="bc73e135a464035ffbb1ad6c456f8d3a22f342fc42ab8926f999ecc120028554" exitCode=143 Dec 09 15:35:32 crc kubenswrapper[4716]: I1209 15:35:32.806081 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7e3a56d5-7ebc-4bab-84fd-ac67e93145d8","Type":"ContainerDied","Data":"d34d8d270fe368cd35294ba5940f5756b2d244ecf5ed7e03f78ad23faa5d5ef5"} Dec 09 15:35:32 crc kubenswrapper[4716]: I1209 15:35:32.806135 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7e3a56d5-7ebc-4bab-84fd-ac67e93145d8","Type":"ContainerDied","Data":"bc73e135a464035ffbb1ad6c456f8d3a22f342fc42ab8926f999ecc120028554"} Dec 09 15:35:32 crc kubenswrapper[4716]: I1209 15:35:32.857899 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Dec 09 15:35:32 crc kubenswrapper[4716]: I1209 15:35:32.858106 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="65454596-9d51-4fca-b5eb-c326dc4b3bb6" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.235:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 09 15:35:32 crc kubenswrapper[4716]: I1209 15:35:32.858404 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="65454596-9d51-4fca-b5eb-c326dc4b3bb6" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.235:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.156107 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.307323 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mfbt2\" (UniqueName: \"kubernetes.io/projected/7e3a56d5-7ebc-4bab-84fd-ac67e93145d8-kube-api-access-mfbt2\") pod \"7e3a56d5-7ebc-4bab-84fd-ac67e93145d8\" (UID: \"7e3a56d5-7ebc-4bab-84fd-ac67e93145d8\") " Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.307431 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e3a56d5-7ebc-4bab-84fd-ac67e93145d8-config-data\") pod \"7e3a56d5-7ebc-4bab-84fd-ac67e93145d8\" (UID: \"7e3a56d5-7ebc-4bab-84fd-ac67e93145d8\") " Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.307525 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3a56d5-7ebc-4bab-84fd-ac67e93145d8-combined-ca-bundle\") pod \"7e3a56d5-7ebc-4bab-84fd-ac67e93145d8\" (UID: \"7e3a56d5-7ebc-4bab-84fd-ac67e93145d8\") " Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.307780 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7e3a56d5-7ebc-4bab-84fd-ac67e93145d8-logs\") pod \"7e3a56d5-7ebc-4bab-84fd-ac67e93145d8\" (UID: \"7e3a56d5-7ebc-4bab-84fd-ac67e93145d8\") " Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.316164 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7e3a56d5-7ebc-4bab-84fd-ac67e93145d8-logs" (OuterVolumeSpecName: "logs") pod "7e3a56d5-7ebc-4bab-84fd-ac67e93145d8" (UID: "7e3a56d5-7ebc-4bab-84fd-ac67e93145d8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.333613 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e3a56d5-7ebc-4bab-84fd-ac67e93145d8-kube-api-access-mfbt2" (OuterVolumeSpecName: "kube-api-access-mfbt2") pod "7e3a56d5-7ebc-4bab-84fd-ac67e93145d8" (UID: "7e3a56d5-7ebc-4bab-84fd-ac67e93145d8"). InnerVolumeSpecName "kube-api-access-mfbt2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.351091 4716 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7e3a56d5-7ebc-4bab-84fd-ac67e93145d8-logs\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.352013 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mfbt2\" (UniqueName: \"kubernetes.io/projected/7e3a56d5-7ebc-4bab-84fd-ac67e93145d8-kube-api-access-mfbt2\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.476819 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e3a56d5-7ebc-4bab-84fd-ac67e93145d8-config-data" (OuterVolumeSpecName: "config-data") pod "7e3a56d5-7ebc-4bab-84fd-ac67e93145d8" (UID: "7e3a56d5-7ebc-4bab-84fd-ac67e93145d8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.496259 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e3a56d5-7ebc-4bab-84fd-ac67e93145d8-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.600945 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7756b9d78c-h2zld" Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.653917 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e3a56d5-7ebc-4bab-84fd-ac67e93145d8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7e3a56d5-7ebc-4bab-84fd-ac67e93145d8" (UID: "7e3a56d5-7ebc-4bab-84fd-ac67e93145d8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.702354 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1893ef85-1e86-4247-8950-95c26985ccbc-dns-svc\") pod \"1893ef85-1e86-4247-8950-95c26985ccbc\" (UID: \"1893ef85-1e86-4247-8950-95c26985ccbc\") " Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.702481 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1893ef85-1e86-4247-8950-95c26985ccbc-config\") pod \"1893ef85-1e86-4247-8950-95c26985ccbc\" (UID: \"1893ef85-1e86-4247-8950-95c26985ccbc\") " Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.702554 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1893ef85-1e86-4247-8950-95c26985ccbc-ovsdbserver-sb\") pod \"1893ef85-1e86-4247-8950-95c26985ccbc\" (UID: \"1893ef85-1e86-4247-8950-95c26985ccbc\") " Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.702644 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1893ef85-1e86-4247-8950-95c26985ccbc-ovsdbserver-nb\") pod \"1893ef85-1e86-4247-8950-95c26985ccbc\" (UID: \"1893ef85-1e86-4247-8950-95c26985ccbc\") " Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.702720 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1893ef85-1e86-4247-8950-95c26985ccbc-dns-swift-storage-0\") pod \"1893ef85-1e86-4247-8950-95c26985ccbc\" (UID: \"1893ef85-1e86-4247-8950-95c26985ccbc\") " Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.702782 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qmfzb\" (UniqueName: \"kubernetes.io/projected/1893ef85-1e86-4247-8950-95c26985ccbc-kube-api-access-qmfzb\") pod \"1893ef85-1e86-4247-8950-95c26985ccbc\" (UID: \"1893ef85-1e86-4247-8950-95c26985ccbc\") " Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.703588 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e3a56d5-7ebc-4bab-84fd-ac67e93145d8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.710426 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1893ef85-1e86-4247-8950-95c26985ccbc-kube-api-access-qmfzb" (OuterVolumeSpecName: "kube-api-access-qmfzb") pod "1893ef85-1e86-4247-8950-95c26985ccbc" (UID: "1893ef85-1e86-4247-8950-95c26985ccbc"). InnerVolumeSpecName "kube-api-access-qmfzb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.806244 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1893ef85-1e86-4247-8950-95c26985ccbc-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1893ef85-1e86-4247-8950-95c26985ccbc" (UID: "1893ef85-1e86-4247-8950-95c26985ccbc"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.806859 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qmfzb\" (UniqueName: \"kubernetes.io/projected/1893ef85-1e86-4247-8950-95c26985ccbc-kube-api-access-qmfzb\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.806899 4716 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1893ef85-1e86-4247-8950-95c26985ccbc-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.831357 4716 generic.go:334] "Generic (PLEG): container finished" podID="2a66dc81-048f-4d25-b04f-16e8063d92b7" containerID="589c9999c73663a2a056f75818b84ace2af485ba6208cdb6bba3d61f6b137093" exitCode=0 Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.831469 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-2zp9h" event={"ID":"2a66dc81-048f-4d25-b04f-16e8063d92b7","Type":"ContainerDied","Data":"589c9999c73663a2a056f75818b84ace2af485ba6208cdb6bba3d61f6b137093"} Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.835559 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1893ef85-1e86-4247-8950-95c26985ccbc-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "1893ef85-1e86-4247-8950-95c26985ccbc" (UID: "1893ef85-1e86-4247-8950-95c26985ccbc"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.841570 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7756b9d78c-h2zld" event={"ID":"1893ef85-1e86-4247-8950-95c26985ccbc","Type":"ContainerDied","Data":"c77d9042923476d03a71e7c17b81298f7c00d69b329d62309b7cb92b8516ba8a"} Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.841611 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7756b9d78c-h2zld" Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.841649 4716 scope.go:117] "RemoveContainer" containerID="15030166a469f0c01e34beed8ba32884e53efc7db49d2f1509100c1c5b992fa3" Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.856328 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.864990 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7e3a56d5-7ebc-4bab-84fd-ac67e93145d8","Type":"ContainerDied","Data":"a513fcc71b74e0e8012272146586bf38b19811da00b1eebfd1821e9d02180e18"} Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.880999 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1893ef85-1e86-4247-8950-95c26985ccbc-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "1893ef85-1e86-4247-8950-95c26985ccbc" (UID: "1893ef85-1e86-4247-8950-95c26985ccbc"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.887757 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1893ef85-1e86-4247-8950-95c26985ccbc-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "1893ef85-1e86-4247-8950-95c26985ccbc" (UID: "1893ef85-1e86-4247-8950-95c26985ccbc"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.909792 4716 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1893ef85-1e86-4247-8950-95c26985ccbc-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.909850 4716 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1893ef85-1e86-4247-8950-95c26985ccbc-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.909879 4716 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1893ef85-1e86-4247-8950-95c26985ccbc-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.932128 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1893ef85-1e86-4247-8950-95c26985ccbc-config" (OuterVolumeSpecName: "config") pod "1893ef85-1e86-4247-8950-95c26985ccbc" (UID: "1893ef85-1e86-4247-8950-95c26985ccbc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.959471 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.980149 4716 scope.go:117] "RemoveContainer" containerID="eb03d62b4505bdd1ebd857e4a46c24056f996ead1be338eaf9cad1125c38117e" Dec 09 15:35:33 crc kubenswrapper[4716]: I1209 15:35:33.989125 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.008860 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Dec 09 15:35:34 crc kubenswrapper[4716]: E1209 15:35:34.009952 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e3a56d5-7ebc-4bab-84fd-ac67e93145d8" containerName="nova-metadata-log" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.009993 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e3a56d5-7ebc-4bab-84fd-ac67e93145d8" containerName="nova-metadata-log" Dec 09 15:35:34 crc kubenswrapper[4716]: E1209 15:35:34.010056 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1893ef85-1e86-4247-8950-95c26985ccbc" containerName="init" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.010076 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="1893ef85-1e86-4247-8950-95c26985ccbc" containerName="init" Dec 09 15:35:34 crc kubenswrapper[4716]: E1209 15:35:34.010103 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e3a56d5-7ebc-4bab-84fd-ac67e93145d8" containerName="nova-metadata-metadata" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.010126 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e3a56d5-7ebc-4bab-84fd-ac67e93145d8" containerName="nova-metadata-metadata" Dec 09 15:35:34 crc kubenswrapper[4716]: E1209 15:35:34.010168 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1893ef85-1e86-4247-8950-95c26985ccbc" containerName="dnsmasq-dns" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.010186 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="1893ef85-1e86-4247-8950-95c26985ccbc" containerName="dnsmasq-dns" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.010728 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e3a56d5-7ebc-4bab-84fd-ac67e93145d8" containerName="nova-metadata-metadata" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.010775 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="1893ef85-1e86-4247-8950-95c26985ccbc" containerName="dnsmasq-dns" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.010816 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e3a56d5-7ebc-4bab-84fd-ac67e93145d8" containerName="nova-metadata-log" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.015554 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.016281 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1893ef85-1e86-4247-8950-95c26985ccbc-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.018858 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.019318 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.041557 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.120271 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/93c10b65-9575-43af-bdc9-7bba6d5b671e-logs\") pod \"nova-metadata-0\" (UID: \"93c10b65-9575-43af-bdc9-7bba6d5b671e\") " pod="openstack/nova-metadata-0" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.120340 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93c10b65-9575-43af-bdc9-7bba6d5b671e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"93c10b65-9575-43af-bdc9-7bba6d5b671e\") " pod="openstack/nova-metadata-0" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.120401 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93c10b65-9575-43af-bdc9-7bba6d5b671e-config-data\") pod \"nova-metadata-0\" (UID: \"93c10b65-9575-43af-bdc9-7bba6d5b671e\") " pod="openstack/nova-metadata-0" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.120523 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/93c10b65-9575-43af-bdc9-7bba6d5b671e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"93c10b65-9575-43af-bdc9-7bba6d5b671e\") " pod="openstack/nova-metadata-0" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.120677 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqmgp\" (UniqueName: \"kubernetes.io/projected/93c10b65-9575-43af-bdc9-7bba6d5b671e-kube-api-access-mqmgp\") pod \"nova-metadata-0\" (UID: \"93c10b65-9575-43af-bdc9-7bba6d5b671e\") " pod="openstack/nova-metadata-0" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.140297 4716 scope.go:117] "RemoveContainer" containerID="d34d8d270fe368cd35294ba5940f5756b2d244ecf5ed7e03f78ad23faa5d5ef5" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.187827 4716 scope.go:117] "RemoveContainer" containerID="bc73e135a464035ffbb1ad6c456f8d3a22f342fc42ab8926f999ecc120028554" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.224590 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/93c10b65-9575-43af-bdc9-7bba6d5b671e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"93c10b65-9575-43af-bdc9-7bba6d5b671e\") " pod="openstack/nova-metadata-0" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.224761 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqmgp\" (UniqueName: \"kubernetes.io/projected/93c10b65-9575-43af-bdc9-7bba6d5b671e-kube-api-access-mqmgp\") pod \"nova-metadata-0\" (UID: \"93c10b65-9575-43af-bdc9-7bba6d5b671e\") " pod="openstack/nova-metadata-0" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.224997 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/93c10b65-9575-43af-bdc9-7bba6d5b671e-logs\") pod \"nova-metadata-0\" (UID: \"93c10b65-9575-43af-bdc9-7bba6d5b671e\") " pod="openstack/nova-metadata-0" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.225055 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93c10b65-9575-43af-bdc9-7bba6d5b671e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"93c10b65-9575-43af-bdc9-7bba6d5b671e\") " pod="openstack/nova-metadata-0" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.225134 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93c10b65-9575-43af-bdc9-7bba6d5b671e-config-data\") pod \"nova-metadata-0\" (UID: \"93c10b65-9575-43af-bdc9-7bba6d5b671e\") " pod="openstack/nova-metadata-0" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.227657 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/93c10b65-9575-43af-bdc9-7bba6d5b671e-logs\") pod \"nova-metadata-0\" (UID: \"93c10b65-9575-43af-bdc9-7bba6d5b671e\") " pod="openstack/nova-metadata-0" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.237373 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/93c10b65-9575-43af-bdc9-7bba6d5b671e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"93c10b65-9575-43af-bdc9-7bba6d5b671e\") " pod="openstack/nova-metadata-0" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.242479 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7756b9d78c-h2zld"] Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.244396 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93c10b65-9575-43af-bdc9-7bba6d5b671e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"93c10b65-9575-43af-bdc9-7bba6d5b671e\") " pod="openstack/nova-metadata-0" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.254748 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93c10b65-9575-43af-bdc9-7bba6d5b671e-config-data\") pod \"nova-metadata-0\" (UID: \"93c10b65-9575-43af-bdc9-7bba6d5b671e\") " pod="openstack/nova-metadata-0" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.257454 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqmgp\" (UniqueName: \"kubernetes.io/projected/93c10b65-9575-43af-bdc9-7bba6d5b671e-kube-api-access-mqmgp\") pod \"nova-metadata-0\" (UID: \"93c10b65-9575-43af-bdc9-7bba6d5b671e\") " pod="openstack/nova-metadata-0" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.300494 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7756b9d78c-h2zld"] Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.493305 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.603675 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-9gtbn" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.648420 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gq82m\" (UniqueName: \"kubernetes.io/projected/8f95389e-c579-4294-b7a5-f22c93e45d85-kube-api-access-gq82m\") pod \"8f95389e-c579-4294-b7a5-f22c93e45d85\" (UID: \"8f95389e-c579-4294-b7a5-f22c93e45d85\") " Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.650059 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f95389e-c579-4294-b7a5-f22c93e45d85-scripts\") pod \"8f95389e-c579-4294-b7a5-f22c93e45d85\" (UID: \"8f95389e-c579-4294-b7a5-f22c93e45d85\") " Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.650246 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f95389e-c579-4294-b7a5-f22c93e45d85-config-data\") pod \"8f95389e-c579-4294-b7a5-f22c93e45d85\" (UID: \"8f95389e-c579-4294-b7a5-f22c93e45d85\") " Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.650280 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f95389e-c579-4294-b7a5-f22c93e45d85-combined-ca-bundle\") pod \"8f95389e-c579-4294-b7a5-f22c93e45d85\" (UID: \"8f95389e-c579-4294-b7a5-f22c93e45d85\") " Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.659538 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f95389e-c579-4294-b7a5-f22c93e45d85-scripts" (OuterVolumeSpecName: "scripts") pod "8f95389e-c579-4294-b7a5-f22c93e45d85" (UID: "8f95389e-c579-4294-b7a5-f22c93e45d85"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.660252 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f95389e-c579-4294-b7a5-f22c93e45d85-kube-api-access-gq82m" (OuterVolumeSpecName: "kube-api-access-gq82m") pod "8f95389e-c579-4294-b7a5-f22c93e45d85" (UID: "8f95389e-c579-4294-b7a5-f22c93e45d85"). InnerVolumeSpecName "kube-api-access-gq82m". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.663343 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gq82m\" (UniqueName: \"kubernetes.io/projected/8f95389e-c579-4294-b7a5-f22c93e45d85-kube-api-access-gq82m\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.663391 4716 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f95389e-c579-4294-b7a5-f22c93e45d85-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.711836 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f95389e-c579-4294-b7a5-f22c93e45d85-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8f95389e-c579-4294-b7a5-f22c93e45d85" (UID: "8f95389e-c579-4294-b7a5-f22c93e45d85"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.719420 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f95389e-c579-4294-b7a5-f22c93e45d85-config-data" (OuterVolumeSpecName: "config-data") pod "8f95389e-c579-4294-b7a5-f22c93e45d85" (UID: "8f95389e-c579-4294-b7a5-f22c93e45d85"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.768591 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f95389e-c579-4294-b7a5-f22c93e45d85-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.769018 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f95389e-c579-4294-b7a5-f22c93e45d85-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.883867 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-9gtbn" event={"ID":"8f95389e-c579-4294-b7a5-f22c93e45d85","Type":"ContainerDied","Data":"46544c4fb7ddd7320994262282552c1fa32de86d45753b0401e24786fdc5b188"} Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.883917 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="46544c4fb7ddd7320994262282552c1fa32de86d45753b0401e24786fdc5b188" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.884016 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-9gtbn" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.922845 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Dec 09 15:35:34 crc kubenswrapper[4716]: E1209 15:35:34.925298 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f95389e-c579-4294-b7a5-f22c93e45d85" containerName="nova-cell1-conductor-db-sync" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.925339 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f95389e-c579-4294-b7a5-f22c93e45d85" containerName="nova-cell1-conductor-db-sync" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.925971 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f95389e-c579-4294-b7a5-f22c93e45d85" containerName="nova-cell1-conductor-db-sync" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.933794 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.945094 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.945243 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.977270 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pp2nn\" (UniqueName: \"kubernetes.io/projected/15289e69-4a3c-4832-bfc8-a2af3de4ca72-kube-api-access-pp2nn\") pod \"nova-cell1-conductor-0\" (UID: \"15289e69-4a3c-4832-bfc8-a2af3de4ca72\") " pod="openstack/nova-cell1-conductor-0" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.977382 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15289e69-4a3c-4832-bfc8-a2af3de4ca72-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"15289e69-4a3c-4832-bfc8-a2af3de4ca72\") " pod="openstack/nova-cell1-conductor-0" Dec 09 15:35:34 crc kubenswrapper[4716]: I1209 15:35:34.981368 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15289e69-4a3c-4832-bfc8-a2af3de4ca72-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"15289e69-4a3c-4832-bfc8-a2af3de4ca72\") " pod="openstack/nova-cell1-conductor-0" Dec 09 15:35:35 crc kubenswrapper[4716]: I1209 15:35:35.080603 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 09 15:35:35 crc kubenswrapper[4716]: I1209 15:35:35.084213 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pp2nn\" (UniqueName: \"kubernetes.io/projected/15289e69-4a3c-4832-bfc8-a2af3de4ca72-kube-api-access-pp2nn\") pod \"nova-cell1-conductor-0\" (UID: \"15289e69-4a3c-4832-bfc8-a2af3de4ca72\") " pod="openstack/nova-cell1-conductor-0" Dec 09 15:35:35 crc kubenswrapper[4716]: I1209 15:35:35.084310 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15289e69-4a3c-4832-bfc8-a2af3de4ca72-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"15289e69-4a3c-4832-bfc8-a2af3de4ca72\") " pod="openstack/nova-cell1-conductor-0" Dec 09 15:35:35 crc kubenswrapper[4716]: I1209 15:35:35.084406 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15289e69-4a3c-4832-bfc8-a2af3de4ca72-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"15289e69-4a3c-4832-bfc8-a2af3de4ca72\") " pod="openstack/nova-cell1-conductor-0" Dec 09 15:35:35 crc kubenswrapper[4716]: I1209 15:35:35.093378 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15289e69-4a3c-4832-bfc8-a2af3de4ca72-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"15289e69-4a3c-4832-bfc8-a2af3de4ca72\") " pod="openstack/nova-cell1-conductor-0" Dec 09 15:35:35 crc kubenswrapper[4716]: I1209 15:35:35.098136 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15289e69-4a3c-4832-bfc8-a2af3de4ca72-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"15289e69-4a3c-4832-bfc8-a2af3de4ca72\") " pod="openstack/nova-cell1-conductor-0" Dec 09 15:35:35 crc kubenswrapper[4716]: I1209 15:35:35.108895 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pp2nn\" (UniqueName: \"kubernetes.io/projected/15289e69-4a3c-4832-bfc8-a2af3de4ca72-kube-api-access-pp2nn\") pod \"nova-cell1-conductor-0\" (UID: \"15289e69-4a3c-4832-bfc8-a2af3de4ca72\") " pod="openstack/nova-cell1-conductor-0" Dec 09 15:35:35 crc kubenswrapper[4716]: I1209 15:35:35.259152 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1893ef85-1e86-4247-8950-95c26985ccbc" path="/var/lib/kubelet/pods/1893ef85-1e86-4247-8950-95c26985ccbc/volumes" Dec 09 15:35:35 crc kubenswrapper[4716]: I1209 15:35:35.260254 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7e3a56d5-7ebc-4bab-84fd-ac67e93145d8" path="/var/lib/kubelet/pods/7e3a56d5-7ebc-4bab-84fd-ac67e93145d8/volumes" Dec 09 15:35:35 crc kubenswrapper[4716]: I1209 15:35:35.344803 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Dec 09 15:35:35 crc kubenswrapper[4716]: I1209 15:35:35.507921 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-2zp9h" Dec 09 15:35:35 crc kubenswrapper[4716]: I1209 15:35:35.597023 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-znjlh\" (UniqueName: \"kubernetes.io/projected/2a66dc81-048f-4d25-b04f-16e8063d92b7-kube-api-access-znjlh\") pod \"2a66dc81-048f-4d25-b04f-16e8063d92b7\" (UID: \"2a66dc81-048f-4d25-b04f-16e8063d92b7\") " Dec 09 15:35:35 crc kubenswrapper[4716]: I1209 15:35:35.597473 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a66dc81-048f-4d25-b04f-16e8063d92b7-scripts\") pod \"2a66dc81-048f-4d25-b04f-16e8063d92b7\" (UID: \"2a66dc81-048f-4d25-b04f-16e8063d92b7\") " Dec 09 15:35:35 crc kubenswrapper[4716]: I1209 15:35:35.597550 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a66dc81-048f-4d25-b04f-16e8063d92b7-config-data\") pod \"2a66dc81-048f-4d25-b04f-16e8063d92b7\" (UID: \"2a66dc81-048f-4d25-b04f-16e8063d92b7\") " Dec 09 15:35:35 crc kubenswrapper[4716]: I1209 15:35:35.597663 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a66dc81-048f-4d25-b04f-16e8063d92b7-combined-ca-bundle\") pod \"2a66dc81-048f-4d25-b04f-16e8063d92b7\" (UID: \"2a66dc81-048f-4d25-b04f-16e8063d92b7\") " Dec 09 15:35:35 crc kubenswrapper[4716]: I1209 15:35:35.602999 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a66dc81-048f-4d25-b04f-16e8063d92b7-kube-api-access-znjlh" (OuterVolumeSpecName: "kube-api-access-znjlh") pod "2a66dc81-048f-4d25-b04f-16e8063d92b7" (UID: "2a66dc81-048f-4d25-b04f-16e8063d92b7"). InnerVolumeSpecName "kube-api-access-znjlh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:35:35 crc kubenswrapper[4716]: I1209 15:35:35.606879 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a66dc81-048f-4d25-b04f-16e8063d92b7-scripts" (OuterVolumeSpecName: "scripts") pod "2a66dc81-048f-4d25-b04f-16e8063d92b7" (UID: "2a66dc81-048f-4d25-b04f-16e8063d92b7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:35:35 crc kubenswrapper[4716]: I1209 15:35:35.634081 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a66dc81-048f-4d25-b04f-16e8063d92b7-config-data" (OuterVolumeSpecName: "config-data") pod "2a66dc81-048f-4d25-b04f-16e8063d92b7" (UID: "2a66dc81-048f-4d25-b04f-16e8063d92b7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:35:35 crc kubenswrapper[4716]: I1209 15:35:35.635439 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a66dc81-048f-4d25-b04f-16e8063d92b7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2a66dc81-048f-4d25-b04f-16e8063d92b7" (UID: "2a66dc81-048f-4d25-b04f-16e8063d92b7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:35:35 crc kubenswrapper[4716]: I1209 15:35:35.702174 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-znjlh\" (UniqueName: \"kubernetes.io/projected/2a66dc81-048f-4d25-b04f-16e8063d92b7-kube-api-access-znjlh\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:35 crc kubenswrapper[4716]: I1209 15:35:35.702216 4716 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a66dc81-048f-4d25-b04f-16e8063d92b7-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:35 crc kubenswrapper[4716]: I1209 15:35:35.702311 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a66dc81-048f-4d25-b04f-16e8063d92b7-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:35 crc kubenswrapper[4716]: I1209 15:35:35.702330 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a66dc81-048f-4d25-b04f-16e8063d92b7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:35 crc kubenswrapper[4716]: W1209 15:35:35.852779 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod15289e69_4a3c_4832_bfc8_a2af3de4ca72.slice/crio-faaf840c271fa2cfc001ce27d1946a62c082a6d77732e85e71257570f5e1a0aa WatchSource:0}: Error finding container faaf840c271fa2cfc001ce27d1946a62c082a6d77732e85e71257570f5e1a0aa: Status 404 returned error can't find the container with id faaf840c271fa2cfc001ce27d1946a62c082a6d77732e85e71257570f5e1a0aa Dec 09 15:35:35 crc kubenswrapper[4716]: I1209 15:35:35.860598 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Dec 09 15:35:35 crc kubenswrapper[4716]: I1209 15:35:35.929957 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"15289e69-4a3c-4832-bfc8-a2af3de4ca72","Type":"ContainerStarted","Data":"faaf840c271fa2cfc001ce27d1946a62c082a6d77732e85e71257570f5e1a0aa"} Dec 09 15:35:35 crc kubenswrapper[4716]: I1209 15:35:35.932276 4716 generic.go:334] "Generic (PLEG): container finished" podID="560906c8-b759-4faa-9a29-86f3c8f92cdb" containerID="d330b664f373e2ad2b15415e7edc2cba5711eed0e0a1b40b85e0d50c5e2fb448" exitCode=0 Dec 09 15:35:35 crc kubenswrapper[4716]: I1209 15:35:35.932373 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-lzlrv" event={"ID":"560906c8-b759-4faa-9a29-86f3c8f92cdb","Type":"ContainerDied","Data":"d330b664f373e2ad2b15415e7edc2cba5711eed0e0a1b40b85e0d50c5e2fb448"} Dec 09 15:35:35 crc kubenswrapper[4716]: I1209 15:35:35.944658 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-2zp9h" event={"ID":"2a66dc81-048f-4d25-b04f-16e8063d92b7","Type":"ContainerDied","Data":"b2eb1527b2e309fa50a8ef038aabd3cf7d6d10db343224a3fa73d219d256d8ef"} Dec 09 15:35:35 crc kubenswrapper[4716]: I1209 15:35:35.944720 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b2eb1527b2e309fa50a8ef038aabd3cf7d6d10db343224a3fa73d219d256d8ef" Dec 09 15:35:35 crc kubenswrapper[4716]: I1209 15:35:35.944840 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-2zp9h" Dec 09 15:35:35 crc kubenswrapper[4716]: I1209 15:35:35.954847 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"93c10b65-9575-43af-bdc9-7bba6d5b671e","Type":"ContainerStarted","Data":"a6d2776c02ae2ef57827efaf9bc4cf3a3ed0a43216c2ec6b7b9aa019f836120e"} Dec 09 15:35:35 crc kubenswrapper[4716]: I1209 15:35:35.954897 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"93c10b65-9575-43af-bdc9-7bba6d5b671e","Type":"ContainerStarted","Data":"a3e74b595d8c7eb4ef6c19256657213066d6fd4ace59ee74b822d5aebc3d2206"} Dec 09 15:35:35 crc kubenswrapper[4716]: I1209 15:35:35.954923 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"93c10b65-9575-43af-bdc9-7bba6d5b671e","Type":"ContainerStarted","Data":"9a4737752c0e771388de8740aaf77c9e45874d4d4ad2061a8ed9c755acf71f87"} Dec 09 15:35:36 crc kubenswrapper[4716]: I1209 15:35:36.139848 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.139817205 podStartE2EDuration="3.139817205s" podCreationTimestamp="2025-12-09 15:35:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:35:35.998380168 +0000 UTC m=+1623.153124256" watchObservedRunningTime="2025-12-09 15:35:36.139817205 +0000 UTC m=+1623.294561193" Dec 09 15:35:36 crc kubenswrapper[4716]: I1209 15:35:36.155529 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 09 15:35:36 crc kubenswrapper[4716]: I1209 15:35:36.155779 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="65454596-9d51-4fca-b5eb-c326dc4b3bb6" containerName="nova-api-log" containerID="cri-o://45f5fa63a69b2f7b375897fa5387a22fd7853e208cf5ff1fdcd0f8d360237ffb" gracePeriod=30 Dec 09 15:35:36 crc kubenswrapper[4716]: I1209 15:35:36.155853 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="65454596-9d51-4fca-b5eb-c326dc4b3bb6" containerName="nova-api-api" containerID="cri-o://58fd8d5da688890a47d1cb2a502aa495bfadb57ec486a533e334d3c00bf5375e" gracePeriod=30 Dec 09 15:35:36 crc kubenswrapper[4716]: I1209 15:35:36.211414 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Dec 09 15:35:36 crc kubenswrapper[4716]: I1209 15:35:36.214254 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="a7089c58-3ddf-493a-a521-6cbb1c8f4cba" containerName="nova-scheduler-scheduler" containerID="cri-o://faf080d62f15b006030c17b96f40208ebcfa6e515409a6944095ea1381560254" gracePeriod=30 Dec 09 15:35:36 crc kubenswrapper[4716]: I1209 15:35:36.259027 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 09 15:35:36 crc kubenswrapper[4716]: I1209 15:35:36.720584 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Dec 09 15:35:36 crc kubenswrapper[4716]: I1209 15:35:36.969410 4716 generic.go:334] "Generic (PLEG): container finished" podID="65454596-9d51-4fca-b5eb-c326dc4b3bb6" containerID="45f5fa63a69b2f7b375897fa5387a22fd7853e208cf5ff1fdcd0f8d360237ffb" exitCode=143 Dec 09 15:35:36 crc kubenswrapper[4716]: I1209 15:35:36.969516 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"65454596-9d51-4fca-b5eb-c326dc4b3bb6","Type":"ContainerDied","Data":"45f5fa63a69b2f7b375897fa5387a22fd7853e208cf5ff1fdcd0f8d360237ffb"} Dec 09 15:35:36 crc kubenswrapper[4716]: I1209 15:35:36.972266 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"15289e69-4a3c-4832-bfc8-a2af3de4ca72","Type":"ContainerStarted","Data":"be71c3644e138ad8167a4d30af0f95c0cc07742b46a0b757c0701087c7ea7a6a"} Dec 09 15:35:36 crc kubenswrapper[4716]: I1209 15:35:36.974260 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Dec 09 15:35:36 crc kubenswrapper[4716]: I1209 15:35:36.975968 4716 generic.go:334] "Generic (PLEG): container finished" podID="a7089c58-3ddf-493a-a521-6cbb1c8f4cba" containerID="faf080d62f15b006030c17b96f40208ebcfa6e515409a6944095ea1381560254" exitCode=0 Dec 09 15:35:36 crc kubenswrapper[4716]: I1209 15:35:36.976140 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a7089c58-3ddf-493a-a521-6cbb1c8f4cba","Type":"ContainerDied","Data":"faf080d62f15b006030c17b96f40208ebcfa6e515409a6944095ea1381560254"} Dec 09 15:35:37 crc kubenswrapper[4716]: I1209 15:35:37.009871 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=3.009826921 podStartE2EDuration="3.009826921s" podCreationTimestamp="2025-12-09 15:35:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:35:37.001391533 +0000 UTC m=+1624.156135531" watchObservedRunningTime="2025-12-09 15:35:37.009826921 +0000 UTC m=+1624.164570909" Dec 09 15:35:37 crc kubenswrapper[4716]: E1209 15:35:37.046881 4716 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of faf080d62f15b006030c17b96f40208ebcfa6e515409a6944095ea1381560254 is running failed: container process not found" containerID="faf080d62f15b006030c17b96f40208ebcfa6e515409a6944095ea1381560254" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 09 15:35:37 crc kubenswrapper[4716]: E1209 15:35:37.048279 4716 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of faf080d62f15b006030c17b96f40208ebcfa6e515409a6944095ea1381560254 is running failed: container process not found" containerID="faf080d62f15b006030c17b96f40208ebcfa6e515409a6944095ea1381560254" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 09 15:35:37 crc kubenswrapper[4716]: E1209 15:35:37.050366 4716 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of faf080d62f15b006030c17b96f40208ebcfa6e515409a6944095ea1381560254 is running failed: container process not found" containerID="faf080d62f15b006030c17b96f40208ebcfa6e515409a6944095ea1381560254" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 09 15:35:37 crc kubenswrapper[4716]: E1209 15:35:37.050436 4716 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of faf080d62f15b006030c17b96f40208ebcfa6e515409a6944095ea1381560254 is running failed: container process not found" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="a7089c58-3ddf-493a-a521-6cbb1c8f4cba" containerName="nova-scheduler-scheduler" Dec 09 15:35:37 crc kubenswrapper[4716]: I1209 15:35:37.593791 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 09 15:35:37 crc kubenswrapper[4716]: I1209 15:35:37.605745 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-lzlrv" Dec 09 15:35:37 crc kubenswrapper[4716]: I1209 15:35:37.683601 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7089c58-3ddf-493a-a521-6cbb1c8f4cba-combined-ca-bundle\") pod \"a7089c58-3ddf-493a-a521-6cbb1c8f4cba\" (UID: \"a7089c58-3ddf-493a-a521-6cbb1c8f4cba\") " Dec 09 15:35:37 crc kubenswrapper[4716]: I1209 15:35:37.683671 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k6zxw\" (UniqueName: \"kubernetes.io/projected/a7089c58-3ddf-493a-a521-6cbb1c8f4cba-kube-api-access-k6zxw\") pod \"a7089c58-3ddf-493a-a521-6cbb1c8f4cba\" (UID: \"a7089c58-3ddf-493a-a521-6cbb1c8f4cba\") " Dec 09 15:35:37 crc kubenswrapper[4716]: I1209 15:35:37.683699 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/560906c8-b759-4faa-9a29-86f3c8f92cdb-scripts\") pod \"560906c8-b759-4faa-9a29-86f3c8f92cdb\" (UID: \"560906c8-b759-4faa-9a29-86f3c8f92cdb\") " Dec 09 15:35:37 crc kubenswrapper[4716]: I1209 15:35:37.683786 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/560906c8-b759-4faa-9a29-86f3c8f92cdb-combined-ca-bundle\") pod \"560906c8-b759-4faa-9a29-86f3c8f92cdb\" (UID: \"560906c8-b759-4faa-9a29-86f3c8f92cdb\") " Dec 09 15:35:37 crc kubenswrapper[4716]: I1209 15:35:37.683821 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9rdtq\" (UniqueName: \"kubernetes.io/projected/560906c8-b759-4faa-9a29-86f3c8f92cdb-kube-api-access-9rdtq\") pod \"560906c8-b759-4faa-9a29-86f3c8f92cdb\" (UID: \"560906c8-b759-4faa-9a29-86f3c8f92cdb\") " Dec 09 15:35:37 crc kubenswrapper[4716]: I1209 15:35:37.683898 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/560906c8-b759-4faa-9a29-86f3c8f92cdb-config-data\") pod \"560906c8-b759-4faa-9a29-86f3c8f92cdb\" (UID: \"560906c8-b759-4faa-9a29-86f3c8f92cdb\") " Dec 09 15:35:37 crc kubenswrapper[4716]: I1209 15:35:37.683917 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7089c58-3ddf-493a-a521-6cbb1c8f4cba-config-data\") pod \"a7089c58-3ddf-493a-a521-6cbb1c8f4cba\" (UID: \"a7089c58-3ddf-493a-a521-6cbb1c8f4cba\") " Dec 09 15:35:37 crc kubenswrapper[4716]: I1209 15:35:37.689611 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/560906c8-b759-4faa-9a29-86f3c8f92cdb-kube-api-access-9rdtq" (OuterVolumeSpecName: "kube-api-access-9rdtq") pod "560906c8-b759-4faa-9a29-86f3c8f92cdb" (UID: "560906c8-b759-4faa-9a29-86f3c8f92cdb"). InnerVolumeSpecName "kube-api-access-9rdtq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:35:37 crc kubenswrapper[4716]: I1209 15:35:37.691005 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7089c58-3ddf-493a-a521-6cbb1c8f4cba-kube-api-access-k6zxw" (OuterVolumeSpecName: "kube-api-access-k6zxw") pod "a7089c58-3ddf-493a-a521-6cbb1c8f4cba" (UID: "a7089c58-3ddf-493a-a521-6cbb1c8f4cba"). InnerVolumeSpecName "kube-api-access-k6zxw". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:35:37 crc kubenswrapper[4716]: I1209 15:35:37.696038 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/560906c8-b759-4faa-9a29-86f3c8f92cdb-scripts" (OuterVolumeSpecName: "scripts") pod "560906c8-b759-4faa-9a29-86f3c8f92cdb" (UID: "560906c8-b759-4faa-9a29-86f3c8f92cdb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:35:37 crc kubenswrapper[4716]: E1209 15:35:37.716114 4716 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a7089c58-3ddf-493a-a521-6cbb1c8f4cba-combined-ca-bundle podName:a7089c58-3ddf-493a-a521-6cbb1c8f4cba nodeName:}" failed. No retries permitted until 2025-12-09 15:35:38.216081329 +0000 UTC m=+1625.370825327 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "combined-ca-bundle" (UniqueName: "kubernetes.io/secret/a7089c58-3ddf-493a-a521-6cbb1c8f4cba-combined-ca-bundle") pod "a7089c58-3ddf-493a-a521-6cbb1c8f4cba" (UID: "a7089c58-3ddf-493a-a521-6cbb1c8f4cba") : error deleting /var/lib/kubelet/pods/a7089c58-3ddf-493a-a521-6cbb1c8f4cba/volume-subpaths: remove /var/lib/kubelet/pods/a7089c58-3ddf-493a-a521-6cbb1c8f4cba/volume-subpaths: no such file or directory Dec 09 15:35:37 crc kubenswrapper[4716]: I1209 15:35:37.719129 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/560906c8-b759-4faa-9a29-86f3c8f92cdb-config-data" (OuterVolumeSpecName: "config-data") pod "560906c8-b759-4faa-9a29-86f3c8f92cdb" (UID: "560906c8-b759-4faa-9a29-86f3c8f92cdb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:35:37 crc kubenswrapper[4716]: I1209 15:35:37.719216 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7089c58-3ddf-493a-a521-6cbb1c8f4cba-config-data" (OuterVolumeSpecName: "config-data") pod "a7089c58-3ddf-493a-a521-6cbb1c8f4cba" (UID: "a7089c58-3ddf-493a-a521-6cbb1c8f4cba"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:35:37 crc kubenswrapper[4716]: I1209 15:35:37.742218 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/560906c8-b759-4faa-9a29-86f3c8f92cdb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "560906c8-b759-4faa-9a29-86f3c8f92cdb" (UID: "560906c8-b759-4faa-9a29-86f3c8f92cdb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:35:37 crc kubenswrapper[4716]: I1209 15:35:37.786392 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/560906c8-b759-4faa-9a29-86f3c8f92cdb-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:37 crc kubenswrapper[4716]: I1209 15:35:37.786429 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7089c58-3ddf-493a-a521-6cbb1c8f4cba-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:37 crc kubenswrapper[4716]: I1209 15:35:37.786439 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k6zxw\" (UniqueName: \"kubernetes.io/projected/a7089c58-3ddf-493a-a521-6cbb1c8f4cba-kube-api-access-k6zxw\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:37 crc kubenswrapper[4716]: I1209 15:35:37.786452 4716 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/560906c8-b759-4faa-9a29-86f3c8f92cdb-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:37 crc kubenswrapper[4716]: I1209 15:35:37.786460 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/560906c8-b759-4faa-9a29-86f3c8f92cdb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:37 crc kubenswrapper[4716]: I1209 15:35:37.786469 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9rdtq\" (UniqueName: \"kubernetes.io/projected/560906c8-b759-4faa-9a29-86f3c8f92cdb-kube-api-access-9rdtq\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:37 crc kubenswrapper[4716]: I1209 15:35:37.990983 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a7089c58-3ddf-493a-a521-6cbb1c8f4cba","Type":"ContainerDied","Data":"3705b8b75e5611b4c5c59bb7ff1cb755e95415dcce88f528068363c26e449eaa"} Dec 09 15:35:37 crc kubenswrapper[4716]: I1209 15:35:37.991043 4716 scope.go:117] "RemoveContainer" containerID="faf080d62f15b006030c17b96f40208ebcfa6e515409a6944095ea1381560254" Dec 09 15:35:37 crc kubenswrapper[4716]: I1209 15:35:37.991212 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 09 15:35:37 crc kubenswrapper[4716]: I1209 15:35:37.994051 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-lzlrv" event={"ID":"560906c8-b759-4faa-9a29-86f3c8f92cdb","Type":"ContainerDied","Data":"83ca10fb0fe9517cc5238120ef112d452e0ad10d1f5edd25dedc7fa057c39159"} Dec 09 15:35:37 crc kubenswrapper[4716]: I1209 15:35:37.994114 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="83ca10fb0fe9517cc5238120ef112d452e0ad10d1f5edd25dedc7fa057c39159" Dec 09 15:35:37 crc kubenswrapper[4716]: I1209 15:35:37.994256 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-lzlrv" Dec 09 15:35:37 crc kubenswrapper[4716]: I1209 15:35:37.994967 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="93c10b65-9575-43af-bdc9-7bba6d5b671e" containerName="nova-metadata-log" containerID="cri-o://a3e74b595d8c7eb4ef6c19256657213066d6fd4ace59ee74b822d5aebc3d2206" gracePeriod=30 Dec 09 15:35:37 crc kubenswrapper[4716]: I1209 15:35:37.995110 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="93c10b65-9575-43af-bdc9-7bba6d5b671e" containerName="nova-metadata-metadata" containerID="cri-o://a6d2776c02ae2ef57827efaf9bc4cf3a3ed0a43216c2ec6b7b9aa019f836120e" gracePeriod=30 Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.297510 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7089c58-3ddf-493a-a521-6cbb1c8f4cba-combined-ca-bundle\") pod \"a7089c58-3ddf-493a-a521-6cbb1c8f4cba\" (UID: \"a7089c58-3ddf-493a-a521-6cbb1c8f4cba\") " Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.304807 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7089c58-3ddf-493a-a521-6cbb1c8f4cba-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a7089c58-3ddf-493a-a521-6cbb1c8f4cba" (UID: "a7089c58-3ddf-493a-a521-6cbb1c8f4cba"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.401113 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7089c58-3ddf-493a-a521-6cbb1c8f4cba-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.521210 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.671777 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.689829 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.704115 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.709068 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/93c10b65-9575-43af-bdc9-7bba6d5b671e-logs\") pod \"93c10b65-9575-43af-bdc9-7bba6d5b671e\" (UID: \"93c10b65-9575-43af-bdc9-7bba6d5b671e\") " Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.709109 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/93c10b65-9575-43af-bdc9-7bba6d5b671e-nova-metadata-tls-certs\") pod \"93c10b65-9575-43af-bdc9-7bba6d5b671e\" (UID: \"93c10b65-9575-43af-bdc9-7bba6d5b671e\") " Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.709323 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mqmgp\" (UniqueName: \"kubernetes.io/projected/93c10b65-9575-43af-bdc9-7bba6d5b671e-kube-api-access-mqmgp\") pod \"93c10b65-9575-43af-bdc9-7bba6d5b671e\" (UID: \"93c10b65-9575-43af-bdc9-7bba6d5b671e\") " Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.709355 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93c10b65-9575-43af-bdc9-7bba6d5b671e-config-data\") pod \"93c10b65-9575-43af-bdc9-7bba6d5b671e\" (UID: \"93c10b65-9575-43af-bdc9-7bba6d5b671e\") " Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.709397 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93c10b65-9575-43af-bdc9-7bba6d5b671e-combined-ca-bundle\") pod \"93c10b65-9575-43af-bdc9-7bba6d5b671e\" (UID: \"93c10b65-9575-43af-bdc9-7bba6d5b671e\") " Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.709453 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/93c10b65-9575-43af-bdc9-7bba6d5b671e-logs" (OuterVolumeSpecName: "logs") pod "93c10b65-9575-43af-bdc9-7bba6d5b671e" (UID: "93c10b65-9575-43af-bdc9-7bba6d5b671e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.710043 4716 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/93c10b65-9575-43af-bdc9-7bba6d5b671e-logs\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.716955 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93c10b65-9575-43af-bdc9-7bba6d5b671e-kube-api-access-mqmgp" (OuterVolumeSpecName: "kube-api-access-mqmgp") pod "93c10b65-9575-43af-bdc9-7bba6d5b671e" (UID: "93c10b65-9575-43af-bdc9-7bba6d5b671e"). InnerVolumeSpecName "kube-api-access-mqmgp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.757915 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.758336 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93c10b65-9575-43af-bdc9-7bba6d5b671e-config-data" (OuterVolumeSpecName: "config-data") pod "93c10b65-9575-43af-bdc9-7bba6d5b671e" (UID: "93c10b65-9575-43af-bdc9-7bba6d5b671e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:35:38 crc kubenswrapper[4716]: E1209 15:35:38.759171 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93c10b65-9575-43af-bdc9-7bba6d5b671e" containerName="nova-metadata-log" Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.759196 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="93c10b65-9575-43af-bdc9-7bba6d5b671e" containerName="nova-metadata-log" Dec 09 15:35:38 crc kubenswrapper[4716]: E1209 15:35:38.759245 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93c10b65-9575-43af-bdc9-7bba6d5b671e" containerName="nova-metadata-metadata" Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.759253 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="93c10b65-9575-43af-bdc9-7bba6d5b671e" containerName="nova-metadata-metadata" Dec 09 15:35:38 crc kubenswrapper[4716]: E1209 15:35:38.759284 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a66dc81-048f-4d25-b04f-16e8063d92b7" containerName="nova-manage" Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.759293 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a66dc81-048f-4d25-b04f-16e8063d92b7" containerName="nova-manage" Dec 09 15:35:38 crc kubenswrapper[4716]: E1209 15:35:38.759331 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7089c58-3ddf-493a-a521-6cbb1c8f4cba" containerName="nova-scheduler-scheduler" Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.759339 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7089c58-3ddf-493a-a521-6cbb1c8f4cba" containerName="nova-scheduler-scheduler" Dec 09 15:35:38 crc kubenswrapper[4716]: E1209 15:35:38.759361 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="560906c8-b759-4faa-9a29-86f3c8f92cdb" containerName="aodh-db-sync" Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.759368 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="560906c8-b759-4faa-9a29-86f3c8f92cdb" containerName="aodh-db-sync" Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.761075 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="93c10b65-9575-43af-bdc9-7bba6d5b671e" containerName="nova-metadata-metadata" Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.761115 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="560906c8-b759-4faa-9a29-86f3c8f92cdb" containerName="aodh-db-sync" Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.761134 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="93c10b65-9575-43af-bdc9-7bba6d5b671e" containerName="nova-metadata-log" Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.761159 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7089c58-3ddf-493a-a521-6cbb1c8f4cba" containerName="nova-scheduler-scheduler" Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.761181 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a66dc81-048f-4d25-b04f-16e8063d92b7" containerName="nova-manage" Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.765389 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.771054 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.814502 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.815278 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd08e4af-c44e-421a-9547-39cd45eb46d2-config-data\") pod \"nova-scheduler-0\" (UID: \"dd08e4af-c44e-421a-9547-39cd45eb46d2\") " pod="openstack/nova-scheduler-0" Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.815489 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sfrlp\" (UniqueName: \"kubernetes.io/projected/dd08e4af-c44e-421a-9547-39cd45eb46d2-kube-api-access-sfrlp\") pod \"nova-scheduler-0\" (UID: \"dd08e4af-c44e-421a-9547-39cd45eb46d2\") " pod="openstack/nova-scheduler-0" Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.816175 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd08e4af-c44e-421a-9547-39cd45eb46d2-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"dd08e4af-c44e-421a-9547-39cd45eb46d2\") " pod="openstack/nova-scheduler-0" Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.816411 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mqmgp\" (UniqueName: \"kubernetes.io/projected/93c10b65-9575-43af-bdc9-7bba6d5b671e-kube-api-access-mqmgp\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.816424 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93c10b65-9575-43af-bdc9-7bba6d5b671e-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.827892 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93c10b65-9575-43af-bdc9-7bba6d5b671e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "93c10b65-9575-43af-bdc9-7bba6d5b671e" (UID: "93c10b65-9575-43af-bdc9-7bba6d5b671e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.843561 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93c10b65-9575-43af-bdc9-7bba6d5b671e-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "93c10b65-9575-43af-bdc9-7bba6d5b671e" (UID: "93c10b65-9575-43af-bdc9-7bba6d5b671e"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.920574 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd08e4af-c44e-421a-9547-39cd45eb46d2-config-data\") pod \"nova-scheduler-0\" (UID: \"dd08e4af-c44e-421a-9547-39cd45eb46d2\") " pod="openstack/nova-scheduler-0" Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.921136 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sfrlp\" (UniqueName: \"kubernetes.io/projected/dd08e4af-c44e-421a-9547-39cd45eb46d2-kube-api-access-sfrlp\") pod \"nova-scheduler-0\" (UID: \"dd08e4af-c44e-421a-9547-39cd45eb46d2\") " pod="openstack/nova-scheduler-0" Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.921267 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd08e4af-c44e-421a-9547-39cd45eb46d2-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"dd08e4af-c44e-421a-9547-39cd45eb46d2\") " pod="openstack/nova-scheduler-0" Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.921451 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93c10b65-9575-43af-bdc9-7bba6d5b671e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.921476 4716 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/93c10b65-9575-43af-bdc9-7bba6d5b671e-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.927360 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd08e4af-c44e-421a-9547-39cd45eb46d2-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"dd08e4af-c44e-421a-9547-39cd45eb46d2\") " pod="openstack/nova-scheduler-0" Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.928284 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd08e4af-c44e-421a-9547-39cd45eb46d2-config-data\") pod \"nova-scheduler-0\" (UID: \"dd08e4af-c44e-421a-9547-39cd45eb46d2\") " pod="openstack/nova-scheduler-0" Dec 09 15:35:38 crc kubenswrapper[4716]: I1209 15:35:38.939947 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sfrlp\" (UniqueName: \"kubernetes.io/projected/dd08e4af-c44e-421a-9547-39cd45eb46d2-kube-api-access-sfrlp\") pod \"nova-scheduler-0\" (UID: \"dd08e4af-c44e-421a-9547-39cd45eb46d2\") " pod="openstack/nova-scheduler-0" Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.009316 4716 generic.go:334] "Generic (PLEG): container finished" podID="93c10b65-9575-43af-bdc9-7bba6d5b671e" containerID="a6d2776c02ae2ef57827efaf9bc4cf3a3ed0a43216c2ec6b7b9aa019f836120e" exitCode=0 Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.009353 4716 generic.go:334] "Generic (PLEG): container finished" podID="93c10b65-9575-43af-bdc9-7bba6d5b671e" containerID="a3e74b595d8c7eb4ef6c19256657213066d6fd4ace59ee74b822d5aebc3d2206" exitCode=143 Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.009433 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.009445 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"93c10b65-9575-43af-bdc9-7bba6d5b671e","Type":"ContainerDied","Data":"a6d2776c02ae2ef57827efaf9bc4cf3a3ed0a43216c2ec6b7b9aa019f836120e"} Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.009479 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"93c10b65-9575-43af-bdc9-7bba6d5b671e","Type":"ContainerDied","Data":"a3e74b595d8c7eb4ef6c19256657213066d6fd4ace59ee74b822d5aebc3d2206"} Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.009492 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"93c10b65-9575-43af-bdc9-7bba6d5b671e","Type":"ContainerDied","Data":"9a4737752c0e771388de8740aaf77c9e45874d4d4ad2061a8ed9c755acf71f87"} Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.009509 4716 scope.go:117] "RemoveContainer" containerID="a6d2776c02ae2ef57827efaf9bc4cf3a3ed0a43216c2ec6b7b9aa019f836120e" Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.037531 4716 scope.go:117] "RemoveContainer" containerID="a3e74b595d8c7eb4ef6c19256657213066d6fd4ace59ee74b822d5aebc3d2206" Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.053079 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.071503 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.075062 4716 scope.go:117] "RemoveContainer" containerID="a6d2776c02ae2ef57827efaf9bc4cf3a3ed0a43216c2ec6b7b9aa019f836120e" Dec 09 15:35:39 crc kubenswrapper[4716]: E1209 15:35:39.075538 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a6d2776c02ae2ef57827efaf9bc4cf3a3ed0a43216c2ec6b7b9aa019f836120e\": container with ID starting with a6d2776c02ae2ef57827efaf9bc4cf3a3ed0a43216c2ec6b7b9aa019f836120e not found: ID does not exist" containerID="a6d2776c02ae2ef57827efaf9bc4cf3a3ed0a43216c2ec6b7b9aa019f836120e" Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.075571 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6d2776c02ae2ef57827efaf9bc4cf3a3ed0a43216c2ec6b7b9aa019f836120e"} err="failed to get container status \"a6d2776c02ae2ef57827efaf9bc4cf3a3ed0a43216c2ec6b7b9aa019f836120e\": rpc error: code = NotFound desc = could not find container \"a6d2776c02ae2ef57827efaf9bc4cf3a3ed0a43216c2ec6b7b9aa019f836120e\": container with ID starting with a6d2776c02ae2ef57827efaf9bc4cf3a3ed0a43216c2ec6b7b9aa019f836120e not found: ID does not exist" Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.075599 4716 scope.go:117] "RemoveContainer" containerID="a3e74b595d8c7eb4ef6c19256657213066d6fd4ace59ee74b822d5aebc3d2206" Dec 09 15:35:39 crc kubenswrapper[4716]: E1209 15:35:39.075838 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a3e74b595d8c7eb4ef6c19256657213066d6fd4ace59ee74b822d5aebc3d2206\": container with ID starting with a3e74b595d8c7eb4ef6c19256657213066d6fd4ace59ee74b822d5aebc3d2206 not found: ID does not exist" containerID="a3e74b595d8c7eb4ef6c19256657213066d6fd4ace59ee74b822d5aebc3d2206" Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.075868 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a3e74b595d8c7eb4ef6c19256657213066d6fd4ace59ee74b822d5aebc3d2206"} err="failed to get container status \"a3e74b595d8c7eb4ef6c19256657213066d6fd4ace59ee74b822d5aebc3d2206\": rpc error: code = NotFound desc = could not find container \"a3e74b595d8c7eb4ef6c19256657213066d6fd4ace59ee74b822d5aebc3d2206\": container with ID starting with a3e74b595d8c7eb4ef6c19256657213066d6fd4ace59ee74b822d5aebc3d2206 not found: ID does not exist" Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.075887 4716 scope.go:117] "RemoveContainer" containerID="a6d2776c02ae2ef57827efaf9bc4cf3a3ed0a43216c2ec6b7b9aa019f836120e" Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.076326 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6d2776c02ae2ef57827efaf9bc4cf3a3ed0a43216c2ec6b7b9aa019f836120e"} err="failed to get container status \"a6d2776c02ae2ef57827efaf9bc4cf3a3ed0a43216c2ec6b7b9aa019f836120e\": rpc error: code = NotFound desc = could not find container \"a6d2776c02ae2ef57827efaf9bc4cf3a3ed0a43216c2ec6b7b9aa019f836120e\": container with ID starting with a6d2776c02ae2ef57827efaf9bc4cf3a3ed0a43216c2ec6b7b9aa019f836120e not found: ID does not exist" Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.076355 4716 scope.go:117] "RemoveContainer" containerID="a3e74b595d8c7eb4ef6c19256657213066d6fd4ace59ee74b822d5aebc3d2206" Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.076610 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a3e74b595d8c7eb4ef6c19256657213066d6fd4ace59ee74b822d5aebc3d2206"} err="failed to get container status \"a3e74b595d8c7eb4ef6c19256657213066d6fd4ace59ee74b822d5aebc3d2206\": rpc error: code = NotFound desc = could not find container \"a3e74b595d8c7eb4ef6c19256657213066d6fd4ace59ee74b822d5aebc3d2206\": container with ID starting with a3e74b595d8c7eb4ef6c19256657213066d6fd4ace59ee74b822d5aebc3d2206 not found: ID does not exist" Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.089378 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.091805 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.107855 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.111163 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.111398 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.128264 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97431237-6850-496d-a176-2e8359c8e0e5-logs\") pod \"nova-metadata-0\" (UID: \"97431237-6850-496d-a176-2e8359c8e0e5\") " pod="openstack/nova-metadata-0" Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.128406 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/97431237-6850-496d-a176-2e8359c8e0e5-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"97431237-6850-496d-a176-2e8359c8e0e5\") " pod="openstack/nova-metadata-0" Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.128458 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5pr2h\" (UniqueName: \"kubernetes.io/projected/97431237-6850-496d-a176-2e8359c8e0e5-kube-api-access-5pr2h\") pod \"nova-metadata-0\" (UID: \"97431237-6850-496d-a176-2e8359c8e0e5\") " pod="openstack/nova-metadata-0" Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.128484 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97431237-6850-496d-a176-2e8359c8e0e5-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"97431237-6850-496d-a176-2e8359c8e0e5\") " pod="openstack/nova-metadata-0" Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.128580 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97431237-6850-496d-a176-2e8359c8e0e5-config-data\") pod \"nova-metadata-0\" (UID: \"97431237-6850-496d-a176-2e8359c8e0e5\") " pod="openstack/nova-metadata-0" Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.162035 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.231326 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5pr2h\" (UniqueName: \"kubernetes.io/projected/97431237-6850-496d-a176-2e8359c8e0e5-kube-api-access-5pr2h\") pod \"nova-metadata-0\" (UID: \"97431237-6850-496d-a176-2e8359c8e0e5\") " pod="openstack/nova-metadata-0" Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.231392 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97431237-6850-496d-a176-2e8359c8e0e5-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"97431237-6850-496d-a176-2e8359c8e0e5\") " pod="openstack/nova-metadata-0" Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.231476 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97431237-6850-496d-a176-2e8359c8e0e5-config-data\") pod \"nova-metadata-0\" (UID: \"97431237-6850-496d-a176-2e8359c8e0e5\") " pod="openstack/nova-metadata-0" Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.231700 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97431237-6850-496d-a176-2e8359c8e0e5-logs\") pod \"nova-metadata-0\" (UID: \"97431237-6850-496d-a176-2e8359c8e0e5\") " pod="openstack/nova-metadata-0" Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.231822 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/97431237-6850-496d-a176-2e8359c8e0e5-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"97431237-6850-496d-a176-2e8359c8e0e5\") " pod="openstack/nova-metadata-0" Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.232323 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97431237-6850-496d-a176-2e8359c8e0e5-logs\") pod \"nova-metadata-0\" (UID: \"97431237-6850-496d-a176-2e8359c8e0e5\") " pod="openstack/nova-metadata-0" Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.235616 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="93c10b65-9575-43af-bdc9-7bba6d5b671e" path="/var/lib/kubelet/pods/93c10b65-9575-43af-bdc9-7bba6d5b671e/volumes" Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.236003 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/97431237-6850-496d-a176-2e8359c8e0e5-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"97431237-6850-496d-a176-2e8359c8e0e5\") " pod="openstack/nova-metadata-0" Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.236322 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97431237-6850-496d-a176-2e8359c8e0e5-config-data\") pod \"nova-metadata-0\" (UID: \"97431237-6850-496d-a176-2e8359c8e0e5\") " pod="openstack/nova-metadata-0" Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.237373 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7089c58-3ddf-493a-a521-6cbb1c8f4cba" path="/var/lib/kubelet/pods/a7089c58-3ddf-493a-a521-6cbb1c8f4cba/volumes" Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.240393 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97431237-6850-496d-a176-2e8359c8e0e5-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"97431237-6850-496d-a176-2e8359c8e0e5\") " pod="openstack/nova-metadata-0" Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.249928 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5pr2h\" (UniqueName: \"kubernetes.io/projected/97431237-6850-496d-a176-2e8359c8e0e5-kube-api-access-5pr2h\") pod \"nova-metadata-0\" (UID: \"97431237-6850-496d-a176-2e8359c8e0e5\") " pod="openstack/nova-metadata-0" Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.434488 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.638414 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 09 15:35:39 crc kubenswrapper[4716]: W1209 15:35:39.671201 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddd08e4af_c44e_421a_9547_39cd45eb46d2.slice/crio-a8a23ec1af70db5df0ebd84d4f1da49d0c415918764476262f3ecf5337106717 WatchSource:0}: Error finding container a8a23ec1af70db5df0ebd84d4f1da49d0c415918764476262f3ecf5337106717: Status 404 returned error can't find the container with id a8a23ec1af70db5df0ebd84d4f1da49d0c415918764476262f3ecf5337106717 Dec 09 15:35:39 crc kubenswrapper[4716]: I1209 15:35:39.937385 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 09 15:35:40 crc kubenswrapper[4716]: I1209 15:35:40.026153 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"97431237-6850-496d-a176-2e8359c8e0e5","Type":"ContainerStarted","Data":"a0eddfd77cd259f1be99e56b3887b442c37cbe7905e5a3b187c1dc365f29736b"} Dec 09 15:35:40 crc kubenswrapper[4716]: I1209 15:35:40.034091 4716 generic.go:334] "Generic (PLEG): container finished" podID="65454596-9d51-4fca-b5eb-c326dc4b3bb6" containerID="58fd8d5da688890a47d1cb2a502aa495bfadb57ec486a533e334d3c00bf5375e" exitCode=0 Dec 09 15:35:40 crc kubenswrapper[4716]: I1209 15:35:40.034220 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"65454596-9d51-4fca-b5eb-c326dc4b3bb6","Type":"ContainerDied","Data":"58fd8d5da688890a47d1cb2a502aa495bfadb57ec486a533e334d3c00bf5375e"} Dec 09 15:35:40 crc kubenswrapper[4716]: I1209 15:35:40.037203 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"dd08e4af-c44e-421a-9547-39cd45eb46d2","Type":"ContainerStarted","Data":"89716d2237c6b8bfb35f35f26e47a735e221d64c751f1ab04d3aa30071330cbd"} Dec 09 15:35:40 crc kubenswrapper[4716]: I1209 15:35:40.037237 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"dd08e4af-c44e-421a-9547-39cd45eb46d2","Type":"ContainerStarted","Data":"a8a23ec1af70db5df0ebd84d4f1da49d0c415918764476262f3ecf5337106717"} Dec 09 15:35:40 crc kubenswrapper[4716]: I1209 15:35:40.058036 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.058015143 podStartE2EDuration="2.058015143s" podCreationTimestamp="2025-12-09 15:35:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:35:40.052751784 +0000 UTC m=+1627.207495772" watchObservedRunningTime="2025-12-09 15:35:40.058015143 +0000 UTC m=+1627.212759131" Dec 09 15:35:40 crc kubenswrapper[4716]: I1209 15:35:40.073865 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 09 15:35:40 crc kubenswrapper[4716]: I1209 15:35:40.154880 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65454596-9d51-4fca-b5eb-c326dc4b3bb6-combined-ca-bundle\") pod \"65454596-9d51-4fca-b5eb-c326dc4b3bb6\" (UID: \"65454596-9d51-4fca-b5eb-c326dc4b3bb6\") " Dec 09 15:35:40 crc kubenswrapper[4716]: I1209 15:35:40.154941 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-skdc7\" (UniqueName: \"kubernetes.io/projected/65454596-9d51-4fca-b5eb-c326dc4b3bb6-kube-api-access-skdc7\") pod \"65454596-9d51-4fca-b5eb-c326dc4b3bb6\" (UID: \"65454596-9d51-4fca-b5eb-c326dc4b3bb6\") " Dec 09 15:35:40 crc kubenswrapper[4716]: I1209 15:35:40.155109 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/65454596-9d51-4fca-b5eb-c326dc4b3bb6-logs\") pod \"65454596-9d51-4fca-b5eb-c326dc4b3bb6\" (UID: \"65454596-9d51-4fca-b5eb-c326dc4b3bb6\") " Dec 09 15:35:40 crc kubenswrapper[4716]: I1209 15:35:40.155957 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/65454596-9d51-4fca-b5eb-c326dc4b3bb6-logs" (OuterVolumeSpecName: "logs") pod "65454596-9d51-4fca-b5eb-c326dc4b3bb6" (UID: "65454596-9d51-4fca-b5eb-c326dc4b3bb6"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:35:40 crc kubenswrapper[4716]: I1209 15:35:40.157862 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65454596-9d51-4fca-b5eb-c326dc4b3bb6-config-data\") pod \"65454596-9d51-4fca-b5eb-c326dc4b3bb6\" (UID: \"65454596-9d51-4fca-b5eb-c326dc4b3bb6\") " Dec 09 15:35:40 crc kubenswrapper[4716]: I1209 15:35:40.160680 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/65454596-9d51-4fca-b5eb-c326dc4b3bb6-kube-api-access-skdc7" (OuterVolumeSpecName: "kube-api-access-skdc7") pod "65454596-9d51-4fca-b5eb-c326dc4b3bb6" (UID: "65454596-9d51-4fca-b5eb-c326dc4b3bb6"). InnerVolumeSpecName "kube-api-access-skdc7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:35:40 crc kubenswrapper[4716]: I1209 15:35:40.161305 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-skdc7\" (UniqueName: \"kubernetes.io/projected/65454596-9d51-4fca-b5eb-c326dc4b3bb6-kube-api-access-skdc7\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:40 crc kubenswrapper[4716]: I1209 15:35:40.161335 4716 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/65454596-9d51-4fca-b5eb-c326dc4b3bb6-logs\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:40 crc kubenswrapper[4716]: I1209 15:35:40.189691 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65454596-9d51-4fca-b5eb-c326dc4b3bb6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "65454596-9d51-4fca-b5eb-c326dc4b3bb6" (UID: "65454596-9d51-4fca-b5eb-c326dc4b3bb6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:35:40 crc kubenswrapper[4716]: I1209 15:35:40.193417 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65454596-9d51-4fca-b5eb-c326dc4b3bb6-config-data" (OuterVolumeSpecName: "config-data") pod "65454596-9d51-4fca-b5eb-c326dc4b3bb6" (UID: "65454596-9d51-4fca-b5eb-c326dc4b3bb6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:35:40 crc kubenswrapper[4716]: I1209 15:35:40.263543 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65454596-9d51-4fca-b5eb-c326dc4b3bb6-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:40 crc kubenswrapper[4716]: I1209 15:35:40.263577 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65454596-9d51-4fca-b5eb-c326dc4b3bb6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:41 crc kubenswrapper[4716]: I1209 15:35:41.050272 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"97431237-6850-496d-a176-2e8359c8e0e5","Type":"ContainerStarted","Data":"f01fc44ce14d9f74dd2ca66db0873641746a5d85822aed2a70b094f88cb215de"} Dec 09 15:35:41 crc kubenswrapper[4716]: I1209 15:35:41.050614 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"97431237-6850-496d-a176-2e8359c8e0e5","Type":"ContainerStarted","Data":"759c32ddb8bfcf642b77002cdb58d17ef69487357834a08e9b415fc324c10a9d"} Dec 09 15:35:41 crc kubenswrapper[4716]: I1209 15:35:41.052959 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"65454596-9d51-4fca-b5eb-c326dc4b3bb6","Type":"ContainerDied","Data":"91b8296ed79479be59467f010f1961c3fff361de6cfb44e28df677b5e115775e"} Dec 09 15:35:41 crc kubenswrapper[4716]: I1209 15:35:41.052985 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 09 15:35:41 crc kubenswrapper[4716]: I1209 15:35:41.053013 4716 scope.go:117] "RemoveContainer" containerID="58fd8d5da688890a47d1cb2a502aa495bfadb57ec486a533e334d3c00bf5375e" Dec 09 15:35:41 crc kubenswrapper[4716]: I1209 15:35:41.091113 4716 scope.go:117] "RemoveContainer" containerID="45f5fa63a69b2f7b375897fa5387a22fd7853e208cf5ff1fdcd0f8d360237ffb" Dec 09 15:35:41 crc kubenswrapper[4716]: I1209 15:35:41.097726 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.097695727 podStartE2EDuration="2.097695727s" podCreationTimestamp="2025-12-09 15:35:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:35:41.076658201 +0000 UTC m=+1628.231402189" watchObservedRunningTime="2025-12-09 15:35:41.097695727 +0000 UTC m=+1628.252439715" Dec 09 15:35:41 crc kubenswrapper[4716]: I1209 15:35:41.136418 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 09 15:35:41 crc kubenswrapper[4716]: I1209 15:35:41.150378 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Dec 09 15:35:41 crc kubenswrapper[4716]: I1209 15:35:41.166153 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Dec 09 15:35:41 crc kubenswrapper[4716]: E1209 15:35:41.166988 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65454596-9d51-4fca-b5eb-c326dc4b3bb6" containerName="nova-api-api" Dec 09 15:35:41 crc kubenswrapper[4716]: I1209 15:35:41.167010 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="65454596-9d51-4fca-b5eb-c326dc4b3bb6" containerName="nova-api-api" Dec 09 15:35:41 crc kubenswrapper[4716]: E1209 15:35:41.167069 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65454596-9d51-4fca-b5eb-c326dc4b3bb6" containerName="nova-api-log" Dec 09 15:35:41 crc kubenswrapper[4716]: I1209 15:35:41.167078 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="65454596-9d51-4fca-b5eb-c326dc4b3bb6" containerName="nova-api-log" Dec 09 15:35:41 crc kubenswrapper[4716]: I1209 15:35:41.167428 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="65454596-9d51-4fca-b5eb-c326dc4b3bb6" containerName="nova-api-api" Dec 09 15:35:41 crc kubenswrapper[4716]: I1209 15:35:41.167488 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="65454596-9d51-4fca-b5eb-c326dc4b3bb6" containerName="nova-api-log" Dec 09 15:35:41 crc kubenswrapper[4716]: I1209 15:35:41.170498 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 09 15:35:41 crc kubenswrapper[4716]: I1209 15:35:41.177913 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Dec 09 15:35:41 crc kubenswrapper[4716]: I1209 15:35:41.187308 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 09 15:35:41 crc kubenswrapper[4716]: I1209 15:35:41.239937 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="65454596-9d51-4fca-b5eb-c326dc4b3bb6" path="/var/lib/kubelet/pods/65454596-9d51-4fca-b5eb-c326dc4b3bb6/volumes" Dec 09 15:35:41 crc kubenswrapper[4716]: I1209 15:35:41.303245 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4df53e56-77ad-4210-96da-60de5f7da2ec-logs\") pod \"nova-api-0\" (UID: \"4df53e56-77ad-4210-96da-60de5f7da2ec\") " pod="openstack/nova-api-0" Dec 09 15:35:41 crc kubenswrapper[4716]: I1209 15:35:41.303406 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-799z8\" (UniqueName: \"kubernetes.io/projected/4df53e56-77ad-4210-96da-60de5f7da2ec-kube-api-access-799z8\") pod \"nova-api-0\" (UID: \"4df53e56-77ad-4210-96da-60de5f7da2ec\") " pod="openstack/nova-api-0" Dec 09 15:35:41 crc kubenswrapper[4716]: I1209 15:35:41.303427 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4df53e56-77ad-4210-96da-60de5f7da2ec-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"4df53e56-77ad-4210-96da-60de5f7da2ec\") " pod="openstack/nova-api-0" Dec 09 15:35:41 crc kubenswrapper[4716]: I1209 15:35:41.303571 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4df53e56-77ad-4210-96da-60de5f7da2ec-config-data\") pod \"nova-api-0\" (UID: \"4df53e56-77ad-4210-96da-60de5f7da2ec\") " pod="openstack/nova-api-0" Dec 09 15:35:41 crc kubenswrapper[4716]: I1209 15:35:41.406340 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4df53e56-77ad-4210-96da-60de5f7da2ec-config-data\") pod \"nova-api-0\" (UID: \"4df53e56-77ad-4210-96da-60de5f7da2ec\") " pod="openstack/nova-api-0" Dec 09 15:35:41 crc kubenswrapper[4716]: I1209 15:35:41.406871 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4df53e56-77ad-4210-96da-60de5f7da2ec-logs\") pod \"nova-api-0\" (UID: \"4df53e56-77ad-4210-96da-60de5f7da2ec\") " pod="openstack/nova-api-0" Dec 09 15:35:41 crc kubenswrapper[4716]: I1209 15:35:41.407031 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4df53e56-77ad-4210-96da-60de5f7da2ec-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"4df53e56-77ad-4210-96da-60de5f7da2ec\") " pod="openstack/nova-api-0" Dec 09 15:35:41 crc kubenswrapper[4716]: I1209 15:35:41.407059 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-799z8\" (UniqueName: \"kubernetes.io/projected/4df53e56-77ad-4210-96da-60de5f7da2ec-kube-api-access-799z8\") pod \"nova-api-0\" (UID: \"4df53e56-77ad-4210-96da-60de5f7da2ec\") " pod="openstack/nova-api-0" Dec 09 15:35:41 crc kubenswrapper[4716]: I1209 15:35:41.408046 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4df53e56-77ad-4210-96da-60de5f7da2ec-logs\") pod \"nova-api-0\" (UID: \"4df53e56-77ad-4210-96da-60de5f7da2ec\") " pod="openstack/nova-api-0" Dec 09 15:35:41 crc kubenswrapper[4716]: I1209 15:35:41.415369 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4df53e56-77ad-4210-96da-60de5f7da2ec-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"4df53e56-77ad-4210-96da-60de5f7da2ec\") " pod="openstack/nova-api-0" Dec 09 15:35:41 crc kubenswrapper[4716]: I1209 15:35:41.415394 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4df53e56-77ad-4210-96da-60de5f7da2ec-config-data\") pod \"nova-api-0\" (UID: \"4df53e56-77ad-4210-96da-60de5f7da2ec\") " pod="openstack/nova-api-0" Dec 09 15:35:41 crc kubenswrapper[4716]: I1209 15:35:41.425186 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-799z8\" (UniqueName: \"kubernetes.io/projected/4df53e56-77ad-4210-96da-60de5f7da2ec-kube-api-access-799z8\") pod \"nova-api-0\" (UID: \"4df53e56-77ad-4210-96da-60de5f7da2ec\") " pod="openstack/nova-api-0" Dec 09 15:35:41 crc kubenswrapper[4716]: I1209 15:35:41.503242 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 09 15:35:42 crc kubenswrapper[4716]: I1209 15:35:42.044247 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 09 15:35:42 crc kubenswrapper[4716]: I1209 15:35:42.070657 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4df53e56-77ad-4210-96da-60de5f7da2ec","Type":"ContainerStarted","Data":"c79aa102422f8ef8a39562628b3e5d14086cca56aa514b1fd639c9fa1007ff59"} Dec 09 15:35:42 crc kubenswrapper[4716]: I1209 15:35:42.734212 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Dec 09 15:35:42 crc kubenswrapper[4716]: I1209 15:35:42.738754 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Dec 09 15:35:42 crc kubenswrapper[4716]: I1209 15:35:42.742150 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Dec 09 15:35:42 crc kubenswrapper[4716]: I1209 15:35:42.742455 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-pmtlh" Dec 09 15:35:42 crc kubenswrapper[4716]: I1209 15:35:42.742312 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Dec 09 15:35:42 crc kubenswrapper[4716]: I1209 15:35:42.762426 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Dec 09 15:35:42 crc kubenswrapper[4716]: I1209 15:35:42.843445 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e61209a4-0a5f-4986-8d3b-e22beea4379a-scripts\") pod \"aodh-0\" (UID: \"e61209a4-0a5f-4986-8d3b-e22beea4379a\") " pod="openstack/aodh-0" Dec 09 15:35:42 crc kubenswrapper[4716]: I1209 15:35:42.843582 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-67b9f\" (UniqueName: \"kubernetes.io/projected/e61209a4-0a5f-4986-8d3b-e22beea4379a-kube-api-access-67b9f\") pod \"aodh-0\" (UID: \"e61209a4-0a5f-4986-8d3b-e22beea4379a\") " pod="openstack/aodh-0" Dec 09 15:35:42 crc kubenswrapper[4716]: I1209 15:35:42.843706 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e61209a4-0a5f-4986-8d3b-e22beea4379a-combined-ca-bundle\") pod \"aodh-0\" (UID: \"e61209a4-0a5f-4986-8d3b-e22beea4379a\") " pod="openstack/aodh-0" Dec 09 15:35:42 crc kubenswrapper[4716]: I1209 15:35:42.843782 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e61209a4-0a5f-4986-8d3b-e22beea4379a-config-data\") pod \"aodh-0\" (UID: \"e61209a4-0a5f-4986-8d3b-e22beea4379a\") " pod="openstack/aodh-0" Dec 09 15:35:42 crc kubenswrapper[4716]: I1209 15:35:42.945707 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e61209a4-0a5f-4986-8d3b-e22beea4379a-scripts\") pod \"aodh-0\" (UID: \"e61209a4-0a5f-4986-8d3b-e22beea4379a\") " pod="openstack/aodh-0" Dec 09 15:35:42 crc kubenswrapper[4716]: I1209 15:35:42.945844 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-67b9f\" (UniqueName: \"kubernetes.io/projected/e61209a4-0a5f-4986-8d3b-e22beea4379a-kube-api-access-67b9f\") pod \"aodh-0\" (UID: \"e61209a4-0a5f-4986-8d3b-e22beea4379a\") " pod="openstack/aodh-0" Dec 09 15:35:42 crc kubenswrapper[4716]: I1209 15:35:42.945953 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e61209a4-0a5f-4986-8d3b-e22beea4379a-combined-ca-bundle\") pod \"aodh-0\" (UID: \"e61209a4-0a5f-4986-8d3b-e22beea4379a\") " pod="openstack/aodh-0" Dec 09 15:35:42 crc kubenswrapper[4716]: I1209 15:35:42.946020 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e61209a4-0a5f-4986-8d3b-e22beea4379a-config-data\") pod \"aodh-0\" (UID: \"e61209a4-0a5f-4986-8d3b-e22beea4379a\") " pod="openstack/aodh-0" Dec 09 15:35:42 crc kubenswrapper[4716]: I1209 15:35:42.953286 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e61209a4-0a5f-4986-8d3b-e22beea4379a-config-data\") pod \"aodh-0\" (UID: \"e61209a4-0a5f-4986-8d3b-e22beea4379a\") " pod="openstack/aodh-0" Dec 09 15:35:42 crc kubenswrapper[4716]: I1209 15:35:42.955278 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e61209a4-0a5f-4986-8d3b-e22beea4379a-scripts\") pod \"aodh-0\" (UID: \"e61209a4-0a5f-4986-8d3b-e22beea4379a\") " pod="openstack/aodh-0" Dec 09 15:35:42 crc kubenswrapper[4716]: I1209 15:35:42.956405 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e61209a4-0a5f-4986-8d3b-e22beea4379a-combined-ca-bundle\") pod \"aodh-0\" (UID: \"e61209a4-0a5f-4986-8d3b-e22beea4379a\") " pod="openstack/aodh-0" Dec 09 15:35:42 crc kubenswrapper[4716]: I1209 15:35:42.967167 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-67b9f\" (UniqueName: \"kubernetes.io/projected/e61209a4-0a5f-4986-8d3b-e22beea4379a-kube-api-access-67b9f\") pod \"aodh-0\" (UID: \"e61209a4-0a5f-4986-8d3b-e22beea4379a\") " pod="openstack/aodh-0" Dec 09 15:35:43 crc kubenswrapper[4716]: I1209 15:35:43.084256 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4df53e56-77ad-4210-96da-60de5f7da2ec","Type":"ContainerStarted","Data":"b121f4c3a4c6d1dbca956750cca146c49c684c1175056bd450ce6ccf11331730"} Dec 09 15:35:43 crc kubenswrapper[4716]: I1209 15:35:43.130137 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Dec 09 15:35:43 crc kubenswrapper[4716]: W1209 15:35:43.766404 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode61209a4_0a5f_4986_8d3b_e22beea4379a.slice/crio-3cdf61429908b93fb20e0cae30674b36dbe729ff7e0e78889baa940858f97153 WatchSource:0}: Error finding container 3cdf61429908b93fb20e0cae30674b36dbe729ff7e0e78889baa940858f97153: Status 404 returned error can't find the container with id 3cdf61429908b93fb20e0cae30674b36dbe729ff7e0e78889baa940858f97153 Dec 09 15:35:43 crc kubenswrapper[4716]: I1209 15:35:43.769442 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Dec 09 15:35:44 crc kubenswrapper[4716]: I1209 15:35:44.101421 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4df53e56-77ad-4210-96da-60de5f7da2ec","Type":"ContainerStarted","Data":"c3657509e6b9f17f36de439ca7a6d4b9ea30c9ef780dd4102b006505401485b3"} Dec 09 15:35:44 crc kubenswrapper[4716]: I1209 15:35:44.105104 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e61209a4-0a5f-4986-8d3b-e22beea4379a","Type":"ContainerStarted","Data":"3cdf61429908b93fb20e0cae30674b36dbe729ff7e0e78889baa940858f97153"} Dec 09 15:35:44 crc kubenswrapper[4716]: I1209 15:35:44.125213 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.125187743 podStartE2EDuration="3.125187743s" podCreationTimestamp="2025-12-09 15:35:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:35:44.121240101 +0000 UTC m=+1631.275984089" watchObservedRunningTime="2025-12-09 15:35:44.125187743 +0000 UTC m=+1631.279931731" Dec 09 15:35:44 crc kubenswrapper[4716]: I1209 15:35:44.162293 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Dec 09 15:35:44 crc kubenswrapper[4716]: I1209 15:35:44.434703 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 09 15:35:44 crc kubenswrapper[4716]: I1209 15:35:44.435088 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 09 15:35:45 crc kubenswrapper[4716]: I1209 15:35:45.385405 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Dec 09 15:35:46 crc kubenswrapper[4716]: I1209 15:35:46.087511 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:35:46 crc kubenswrapper[4716]: I1209 15:35:46.088117 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="45a3fab3-ed88-43ff-96d9-fa02a46b23ec" containerName="ceilometer-central-agent" containerID="cri-o://698cdb3b2d1a4cf414fe92f2da9386ee981d7e7931c7deba63d5968dae4245c1" gracePeriod=30 Dec 09 15:35:46 crc kubenswrapper[4716]: I1209 15:35:46.088184 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="45a3fab3-ed88-43ff-96d9-fa02a46b23ec" containerName="proxy-httpd" containerID="cri-o://3e929f1764ee4fd53fe79951d3e1537b5c693234d5ae42832246789f2d70c3c0" gracePeriod=30 Dec 09 15:35:46 crc kubenswrapper[4716]: I1209 15:35:46.088235 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="45a3fab3-ed88-43ff-96d9-fa02a46b23ec" containerName="sg-core" containerID="cri-o://4100ab696b251835939e43d78d26fb8262aefd963c606ff714e2c8c8aa07834a" gracePeriod=30 Dec 09 15:35:46 crc kubenswrapper[4716]: I1209 15:35:46.088283 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="45a3fab3-ed88-43ff-96d9-fa02a46b23ec" containerName="ceilometer-notification-agent" containerID="cri-o://05e191539e5635a752a6bb5bedd814c7143cc4b69f1b2f4ff4623bd45d4ab798" gracePeriod=30 Dec 09 15:35:46 crc kubenswrapper[4716]: I1209 15:35:46.169876 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e61209a4-0a5f-4986-8d3b-e22beea4379a","Type":"ContainerStarted","Data":"552515a36bd9262579215f02c970ef4c17108f6264c9a9fb52a174d336b6ebb1"} Dec 09 15:35:47 crc kubenswrapper[4716]: I1209 15:35:47.283414 4716 generic.go:334] "Generic (PLEG): container finished" podID="45a3fab3-ed88-43ff-96d9-fa02a46b23ec" containerID="3e929f1764ee4fd53fe79951d3e1537b5c693234d5ae42832246789f2d70c3c0" exitCode=0 Dec 09 15:35:47 crc kubenswrapper[4716]: I1209 15:35:47.283744 4716 generic.go:334] "Generic (PLEG): container finished" podID="45a3fab3-ed88-43ff-96d9-fa02a46b23ec" containerID="4100ab696b251835939e43d78d26fb8262aefd963c606ff714e2c8c8aa07834a" exitCode=2 Dec 09 15:35:47 crc kubenswrapper[4716]: I1209 15:35:47.283753 4716 generic.go:334] "Generic (PLEG): container finished" podID="45a3fab3-ed88-43ff-96d9-fa02a46b23ec" containerID="698cdb3b2d1a4cf414fe92f2da9386ee981d7e7931c7deba63d5968dae4245c1" exitCode=0 Dec 09 15:35:47 crc kubenswrapper[4716]: I1209 15:35:47.302001 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"45a3fab3-ed88-43ff-96d9-fa02a46b23ec","Type":"ContainerDied","Data":"3e929f1764ee4fd53fe79951d3e1537b5c693234d5ae42832246789f2d70c3c0"} Dec 09 15:35:47 crc kubenswrapper[4716]: I1209 15:35:47.302048 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Dec 09 15:35:47 crc kubenswrapper[4716]: I1209 15:35:47.302079 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"45a3fab3-ed88-43ff-96d9-fa02a46b23ec","Type":"ContainerDied","Data":"4100ab696b251835939e43d78d26fb8262aefd963c606ff714e2c8c8aa07834a"} Dec 09 15:35:47 crc kubenswrapper[4716]: I1209 15:35:47.302090 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"45a3fab3-ed88-43ff-96d9-fa02a46b23ec","Type":"ContainerDied","Data":"698cdb3b2d1a4cf414fe92f2da9386ee981d7e7931c7deba63d5968dae4245c1"} Dec 09 15:35:47 crc kubenswrapper[4716]: I1209 15:35:47.922338 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 15:35:47 crc kubenswrapper[4716]: I1209 15:35:47.922715 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 15:35:48 crc kubenswrapper[4716]: I1209 15:35:48.300993 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e61209a4-0a5f-4986-8d3b-e22beea4379a","Type":"ContainerStarted","Data":"45065179f0a8198e9355a29eb0b671792d64a78846103e61f6d7704eb09939c8"} Dec 09 15:35:48 crc kubenswrapper[4716]: I1209 15:35:48.634729 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 09 15:35:48 crc kubenswrapper[4716]: I1209 15:35:48.635272 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="8cd62374-51f5-4445-94f2-3d575475d8e6" containerName="kube-state-metrics" containerID="cri-o://4964cb161a626512ca09fa63000fe7db72ca7e2703ce041ec0e5040d3fe0dc32" gracePeriod=30 Dec 09 15:35:48 crc kubenswrapper[4716]: I1209 15:35:48.794461 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-0"] Dec 09 15:35:48 crc kubenswrapper[4716]: I1209 15:35:48.803412 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/mysqld-exporter-0" podUID="54db648f-e02a-4af0-b425-458a51844527" containerName="mysqld-exporter" containerID="cri-o://7b10665d8101b492b30036beb3454a3d4ff99888786d6ffd4bb6a587a8d7d584" gracePeriod=30 Dec 09 15:35:49 crc kubenswrapper[4716]: I1209 15:35:49.164253 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Dec 09 15:35:49 crc kubenswrapper[4716]: I1209 15:35:49.214000 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Dec 09 15:35:49 crc kubenswrapper[4716]: I1209 15:35:49.329639 4716 generic.go:334] "Generic (PLEG): container finished" podID="8cd62374-51f5-4445-94f2-3d575475d8e6" containerID="4964cb161a626512ca09fa63000fe7db72ca7e2703ce041ec0e5040d3fe0dc32" exitCode=2 Dec 09 15:35:49 crc kubenswrapper[4716]: I1209 15:35:49.330153 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"8cd62374-51f5-4445-94f2-3d575475d8e6","Type":"ContainerDied","Data":"4964cb161a626512ca09fa63000fe7db72ca7e2703ce041ec0e5040d3fe0dc32"} Dec 09 15:35:49 crc kubenswrapper[4716]: I1209 15:35:49.333402 4716 generic.go:334] "Generic (PLEG): container finished" podID="54db648f-e02a-4af0-b425-458a51844527" containerID="7b10665d8101b492b30036beb3454a3d4ff99888786d6ffd4bb6a587a8d7d584" exitCode=2 Dec 09 15:35:49 crc kubenswrapper[4716]: I1209 15:35:49.333457 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"54db648f-e02a-4af0-b425-458a51844527","Type":"ContainerDied","Data":"7b10665d8101b492b30036beb3454a3d4ff99888786d6ffd4bb6a587a8d7d584"} Dec 09 15:35:49 crc kubenswrapper[4716]: I1209 15:35:49.335722 4716 generic.go:334] "Generic (PLEG): container finished" podID="45a3fab3-ed88-43ff-96d9-fa02a46b23ec" containerID="05e191539e5635a752a6bb5bedd814c7143cc4b69f1b2f4ff4623bd45d4ab798" exitCode=0 Dec 09 15:35:49 crc kubenswrapper[4716]: I1209 15:35:49.337217 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"45a3fab3-ed88-43ff-96d9-fa02a46b23ec","Type":"ContainerDied","Data":"05e191539e5635a752a6bb5bedd814c7143cc4b69f1b2f4ff4623bd45d4ab798"} Dec 09 15:35:49 crc kubenswrapper[4716]: I1209 15:35:49.375906 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Dec 09 15:35:49 crc kubenswrapper[4716]: I1209 15:35:49.435863 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Dec 09 15:35:49 crc kubenswrapper[4716]: I1209 15:35:49.435914 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Dec 09 15:35:50 crc kubenswrapper[4716]: I1209 15:35:50.492908 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="97431237-6850-496d-a176-2e8359c8e0e5" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.244:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 09 15:35:50 crc kubenswrapper[4716]: I1209 15:35:50.492934 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="97431237-6850-496d-a176-2e8359c8e0e5" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.244:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 09 15:35:50 crc kubenswrapper[4716]: I1209 15:35:50.697761 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 15:35:50 crc kubenswrapper[4716]: I1209 15:35:50.702945 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 09 15:35:50 crc kubenswrapper[4716]: I1209 15:35:50.713129 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Dec 09 15:35:50 crc kubenswrapper[4716]: I1209 15:35:50.827920 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sptnc\" (UniqueName: \"kubernetes.io/projected/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-kube-api-access-sptnc\") pod \"45a3fab3-ed88-43ff-96d9-fa02a46b23ec\" (UID: \"45a3fab3-ed88-43ff-96d9-fa02a46b23ec\") " Dec 09 15:35:50 crc kubenswrapper[4716]: I1209 15:35:50.828012 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-scripts\") pod \"45a3fab3-ed88-43ff-96d9-fa02a46b23ec\" (UID: \"45a3fab3-ed88-43ff-96d9-fa02a46b23ec\") " Dec 09 15:35:50 crc kubenswrapper[4716]: I1209 15:35:50.828042 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-sg-core-conf-yaml\") pod \"45a3fab3-ed88-43ff-96d9-fa02a46b23ec\" (UID: \"45a3fab3-ed88-43ff-96d9-fa02a46b23ec\") " Dec 09 15:35:50 crc kubenswrapper[4716]: I1209 15:35:50.828102 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54db648f-e02a-4af0-b425-458a51844527-config-data\") pod \"54db648f-e02a-4af0-b425-458a51844527\" (UID: \"54db648f-e02a-4af0-b425-458a51844527\") " Dec 09 15:35:50 crc kubenswrapper[4716]: I1209 15:35:50.828166 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54db648f-e02a-4af0-b425-458a51844527-combined-ca-bundle\") pod \"54db648f-e02a-4af0-b425-458a51844527\" (UID: \"54db648f-e02a-4af0-b425-458a51844527\") " Dec 09 15:35:50 crc kubenswrapper[4716]: I1209 15:35:50.828278 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-run-httpd\") pod \"45a3fab3-ed88-43ff-96d9-fa02a46b23ec\" (UID: \"45a3fab3-ed88-43ff-96d9-fa02a46b23ec\") " Dec 09 15:35:50 crc kubenswrapper[4716]: I1209 15:35:50.828371 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wf69k\" (UniqueName: \"kubernetes.io/projected/8cd62374-51f5-4445-94f2-3d575475d8e6-kube-api-access-wf69k\") pod \"8cd62374-51f5-4445-94f2-3d575475d8e6\" (UID: \"8cd62374-51f5-4445-94f2-3d575475d8e6\") " Dec 09 15:35:50 crc kubenswrapper[4716]: I1209 15:35:50.828435 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-config-data\") pod \"45a3fab3-ed88-43ff-96d9-fa02a46b23ec\" (UID: \"45a3fab3-ed88-43ff-96d9-fa02a46b23ec\") " Dec 09 15:35:50 crc kubenswrapper[4716]: I1209 15:35:50.828489 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-log-httpd\") pod \"45a3fab3-ed88-43ff-96d9-fa02a46b23ec\" (UID: \"45a3fab3-ed88-43ff-96d9-fa02a46b23ec\") " Dec 09 15:35:50 crc kubenswrapper[4716]: I1209 15:35:50.828552 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sdrcn\" (UniqueName: \"kubernetes.io/projected/54db648f-e02a-4af0-b425-458a51844527-kube-api-access-sdrcn\") pod \"54db648f-e02a-4af0-b425-458a51844527\" (UID: \"54db648f-e02a-4af0-b425-458a51844527\") " Dec 09 15:35:50 crc kubenswrapper[4716]: I1209 15:35:50.828782 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-combined-ca-bundle\") pod \"45a3fab3-ed88-43ff-96d9-fa02a46b23ec\" (UID: \"45a3fab3-ed88-43ff-96d9-fa02a46b23ec\") " Dec 09 15:35:50 crc kubenswrapper[4716]: I1209 15:35:50.828815 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "45a3fab3-ed88-43ff-96d9-fa02a46b23ec" (UID: "45a3fab3-ed88-43ff-96d9-fa02a46b23ec"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:35:50 crc kubenswrapper[4716]: I1209 15:35:50.830049 4716 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:50 crc kubenswrapper[4716]: I1209 15:35:50.830597 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "45a3fab3-ed88-43ff-96d9-fa02a46b23ec" (UID: "45a3fab3-ed88-43ff-96d9-fa02a46b23ec"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:35:50 crc kubenswrapper[4716]: I1209 15:35:50.836878 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-scripts" (OuterVolumeSpecName: "scripts") pod "45a3fab3-ed88-43ff-96d9-fa02a46b23ec" (UID: "45a3fab3-ed88-43ff-96d9-fa02a46b23ec"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:35:50 crc kubenswrapper[4716]: I1209 15:35:50.838836 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54db648f-e02a-4af0-b425-458a51844527-kube-api-access-sdrcn" (OuterVolumeSpecName: "kube-api-access-sdrcn") pod "54db648f-e02a-4af0-b425-458a51844527" (UID: "54db648f-e02a-4af0-b425-458a51844527"). InnerVolumeSpecName "kube-api-access-sdrcn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:35:50 crc kubenswrapper[4716]: I1209 15:35:50.838896 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-kube-api-access-sptnc" (OuterVolumeSpecName: "kube-api-access-sptnc") pod "45a3fab3-ed88-43ff-96d9-fa02a46b23ec" (UID: "45a3fab3-ed88-43ff-96d9-fa02a46b23ec"). InnerVolumeSpecName "kube-api-access-sptnc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:35:50 crc kubenswrapper[4716]: I1209 15:35:50.840962 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cd62374-51f5-4445-94f2-3d575475d8e6-kube-api-access-wf69k" (OuterVolumeSpecName: "kube-api-access-wf69k") pod "8cd62374-51f5-4445-94f2-3d575475d8e6" (UID: "8cd62374-51f5-4445-94f2-3d575475d8e6"). InnerVolumeSpecName "kube-api-access-wf69k". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:35:50 crc kubenswrapper[4716]: I1209 15:35:50.879676 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54db648f-e02a-4af0-b425-458a51844527-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "54db648f-e02a-4af0-b425-458a51844527" (UID: "54db648f-e02a-4af0-b425-458a51844527"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:35:50 crc kubenswrapper[4716]: I1209 15:35:50.907849 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "45a3fab3-ed88-43ff-96d9-fa02a46b23ec" (UID: "45a3fab3-ed88-43ff-96d9-fa02a46b23ec"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:35:50 crc kubenswrapper[4716]: I1209 15:35:50.934898 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54db648f-e02a-4af0-b425-458a51844527-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:50 crc kubenswrapper[4716]: I1209 15:35:50.935217 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wf69k\" (UniqueName: \"kubernetes.io/projected/8cd62374-51f5-4445-94f2-3d575475d8e6-kube-api-access-wf69k\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:50 crc kubenswrapper[4716]: I1209 15:35:50.935242 4716 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:50 crc kubenswrapper[4716]: I1209 15:35:50.935256 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sdrcn\" (UniqueName: \"kubernetes.io/projected/54db648f-e02a-4af0-b425-458a51844527-kube-api-access-sdrcn\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:50 crc kubenswrapper[4716]: I1209 15:35:50.935268 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sptnc\" (UniqueName: \"kubernetes.io/projected/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-kube-api-access-sptnc\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:50 crc kubenswrapper[4716]: I1209 15:35:50.935282 4716 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:50 crc kubenswrapper[4716]: I1209 15:35:50.935295 4716 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:50 crc kubenswrapper[4716]: I1209 15:35:50.952991 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54db648f-e02a-4af0-b425-458a51844527-config-data" (OuterVolumeSpecName: "config-data") pod "54db648f-e02a-4af0-b425-458a51844527" (UID: "54db648f-e02a-4af0-b425-458a51844527"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:35:50 crc kubenswrapper[4716]: I1209 15:35:50.989542 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "45a3fab3-ed88-43ff-96d9-fa02a46b23ec" (UID: "45a3fab3-ed88-43ff-96d9-fa02a46b23ec"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.018720 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-config-data" (OuterVolumeSpecName: "config-data") pod "45a3fab3-ed88-43ff-96d9-fa02a46b23ec" (UID: "45a3fab3-ed88-43ff-96d9-fa02a46b23ec"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.036911 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54db648f-e02a-4af0-b425-458a51844527-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.036946 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.036956 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45a3fab3-ed88-43ff-96d9-fa02a46b23ec-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.382260 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e61209a4-0a5f-4986-8d3b-e22beea4379a","Type":"ContainerStarted","Data":"bd34a449bc798b2aa98b7707223bad7268b2aa94f25d1735eefd6a5d3b835e4c"} Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.389068 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.389070 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"8cd62374-51f5-4445-94f2-3d575475d8e6","Type":"ContainerDied","Data":"29ab2f1b0d1be8769b6cf008d1aa83819b5f56bed4af0977b2eda911013a69c2"} Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.389251 4716 scope.go:117] "RemoveContainer" containerID="4964cb161a626512ca09fa63000fe7db72ca7e2703ce041ec0e5040d3fe0dc32" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.396495 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"54db648f-e02a-4af0-b425-458a51844527","Type":"ContainerDied","Data":"2b53ca95af9149a9f357a5a3671500ec6fcf423490f121045121a7dc2cae4b84"} Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.396507 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.410792 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"45a3fab3-ed88-43ff-96d9-fa02a46b23ec","Type":"ContainerDied","Data":"56a0c5afc69cb6aa2b674b18d5b4409beee80c4a03f15e7d02ad7c319db3ee0f"} Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.410946 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.498050 4716 scope.go:117] "RemoveContainer" containerID="7b10665d8101b492b30036beb3454a3d4ff99888786d6ffd4bb6a587a8d7d584" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.501944 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.503687 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.503726 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.528309 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.547177 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-0"] Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.573637 4716 scope.go:117] "RemoveContainer" containerID="3e929f1764ee4fd53fe79951d3e1537b5c693234d5ae42832246789f2d70c3c0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.588275 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-0"] Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.601717 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Dec 09 15:35:51 crc kubenswrapper[4716]: E1209 15:35:51.602308 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45a3fab3-ed88-43ff-96d9-fa02a46b23ec" containerName="ceilometer-notification-agent" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.602327 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="45a3fab3-ed88-43ff-96d9-fa02a46b23ec" containerName="ceilometer-notification-agent" Dec 09 15:35:51 crc kubenswrapper[4716]: E1209 15:35:51.602345 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45a3fab3-ed88-43ff-96d9-fa02a46b23ec" containerName="ceilometer-central-agent" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.602351 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="45a3fab3-ed88-43ff-96d9-fa02a46b23ec" containerName="ceilometer-central-agent" Dec 09 15:35:51 crc kubenswrapper[4716]: E1209 15:35:51.602368 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8cd62374-51f5-4445-94f2-3d575475d8e6" containerName="kube-state-metrics" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.602374 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="8cd62374-51f5-4445-94f2-3d575475d8e6" containerName="kube-state-metrics" Dec 09 15:35:51 crc kubenswrapper[4716]: E1209 15:35:51.602404 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45a3fab3-ed88-43ff-96d9-fa02a46b23ec" containerName="sg-core" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.602410 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="45a3fab3-ed88-43ff-96d9-fa02a46b23ec" containerName="sg-core" Dec 09 15:35:51 crc kubenswrapper[4716]: E1209 15:35:51.602424 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54db648f-e02a-4af0-b425-458a51844527" containerName="mysqld-exporter" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.602430 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="54db648f-e02a-4af0-b425-458a51844527" containerName="mysqld-exporter" Dec 09 15:35:51 crc kubenswrapper[4716]: E1209 15:35:51.602446 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45a3fab3-ed88-43ff-96d9-fa02a46b23ec" containerName="proxy-httpd" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.602452 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="45a3fab3-ed88-43ff-96d9-fa02a46b23ec" containerName="proxy-httpd" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.602800 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="45a3fab3-ed88-43ff-96d9-fa02a46b23ec" containerName="ceilometer-central-agent" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.602825 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="45a3fab3-ed88-43ff-96d9-fa02a46b23ec" containerName="ceilometer-notification-agent" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.602856 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="45a3fab3-ed88-43ff-96d9-fa02a46b23ec" containerName="proxy-httpd" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.602869 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="8cd62374-51f5-4445-94f2-3d575475d8e6" containerName="kube-state-metrics" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.602875 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="54db648f-e02a-4af0-b425-458a51844527" containerName="mysqld-exporter" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.602885 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="45a3fab3-ed88-43ff-96d9-fa02a46b23ec" containerName="sg-core" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.604287 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.607291 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-jm2b4" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.607543 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.607669 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.621608 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.642645 4716 scope.go:117] "RemoveContainer" containerID="4100ab696b251835939e43d78d26fb8262aefd963c606ff714e2c8c8aa07834a" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.644340 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.655519 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5td74\" (UniqueName: \"kubernetes.io/projected/0d83e13f-52c5-4369-b2bc-ecb9df8b4baf-kube-api-access-5td74\") pod \"kube-state-metrics-0\" (UID: \"0d83e13f-52c5-4369-b2bc-ecb9df8b4baf\") " pod="openstack/kube-state-metrics-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.655588 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d83e13f-52c5-4369-b2bc-ecb9df8b4baf-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"0d83e13f-52c5-4369-b2bc-ecb9df8b4baf\") " pod="openstack/kube-state-metrics-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.655634 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d83e13f-52c5-4369-b2bc-ecb9df8b4baf-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"0d83e13f-52c5-4369-b2bc-ecb9df8b4baf\") " pod="openstack/kube-state-metrics-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.655705 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/0d83e13f-52c5-4369-b2bc-ecb9df8b4baf-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"0d83e13f-52c5-4369-b2bc-ecb9df8b4baf\") " pod="openstack/kube-state-metrics-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.660686 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-0"] Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.662490 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.667190 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-config-data" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.667389 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-mysqld-exporter-svc" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.677551 4716 scope.go:117] "RemoveContainer" containerID="05e191539e5635a752a6bb5bedd814c7143cc4b69f1b2f4ff4623bd45d4ab798" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.693117 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.709143 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.711437 4716 scope.go:117] "RemoveContainer" containerID="698cdb3b2d1a4cf414fe92f2da9386ee981d7e7931c7deba63d5968dae4245c1" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.728179 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.731449 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.737179 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.737248 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.737377 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.744055 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.760941 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/457d6079-52ef-43f8-8409-6e562a6e8f85-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"457d6079-52ef-43f8-8409-6e562a6e8f85\") " pod="openstack/mysqld-exporter-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.761091 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bae6c605-c542-46f5-9cb2-da55a12b83c0-log-httpd\") pod \"ceilometer-0\" (UID: \"bae6c605-c542-46f5-9cb2-da55a12b83c0\") " pod="openstack/ceilometer-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.761126 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/457d6079-52ef-43f8-8409-6e562a6e8f85-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"457d6079-52ef-43f8-8409-6e562a6e8f85\") " pod="openstack/mysqld-exporter-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.761168 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5td74\" (UniqueName: \"kubernetes.io/projected/0d83e13f-52c5-4369-b2bc-ecb9df8b4baf-kube-api-access-5td74\") pod \"kube-state-metrics-0\" (UID: \"0d83e13f-52c5-4369-b2bc-ecb9df8b4baf\") " pod="openstack/kube-state-metrics-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.761208 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d83e13f-52c5-4369-b2bc-ecb9df8b4baf-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"0d83e13f-52c5-4369-b2bc-ecb9df8b4baf\") " pod="openstack/kube-state-metrics-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.761234 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d83e13f-52c5-4369-b2bc-ecb9df8b4baf-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"0d83e13f-52c5-4369-b2bc-ecb9df8b4baf\") " pod="openstack/kube-state-metrics-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.761281 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mzjc8\" (UniqueName: \"kubernetes.io/projected/bae6c605-c542-46f5-9cb2-da55a12b83c0-kube-api-access-mzjc8\") pod \"ceilometer-0\" (UID: \"bae6c605-c542-46f5-9cb2-da55a12b83c0\") " pod="openstack/ceilometer-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.761317 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bae6c605-c542-46f5-9cb2-da55a12b83c0-run-httpd\") pod \"ceilometer-0\" (UID: \"bae6c605-c542-46f5-9cb2-da55a12b83c0\") " pod="openstack/ceilometer-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.761357 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/0d83e13f-52c5-4369-b2bc-ecb9df8b4baf-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"0d83e13f-52c5-4369-b2bc-ecb9df8b4baf\") " pod="openstack/kube-state-metrics-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.761399 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bae6c605-c542-46f5-9cb2-da55a12b83c0-scripts\") pod \"ceilometer-0\" (UID: \"bae6c605-c542-46f5-9cb2-da55a12b83c0\") " pod="openstack/ceilometer-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.761419 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/bae6c605-c542-46f5-9cb2-da55a12b83c0-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"bae6c605-c542-46f5-9cb2-da55a12b83c0\") " pod="openstack/ceilometer-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.761455 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/457d6079-52ef-43f8-8409-6e562a6e8f85-config-data\") pod \"mysqld-exporter-0\" (UID: \"457d6079-52ef-43f8-8409-6e562a6e8f85\") " pod="openstack/mysqld-exporter-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.761495 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bae6c605-c542-46f5-9cb2-da55a12b83c0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bae6c605-c542-46f5-9cb2-da55a12b83c0\") " pod="openstack/ceilometer-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.761610 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bae6c605-c542-46f5-9cb2-da55a12b83c0-config-data\") pod \"ceilometer-0\" (UID: \"bae6c605-c542-46f5-9cb2-da55a12b83c0\") " pod="openstack/ceilometer-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.761656 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bae6c605-c542-46f5-9cb2-da55a12b83c0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bae6c605-c542-46f5-9cb2-da55a12b83c0\") " pod="openstack/ceilometer-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.761703 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b4h5s\" (UniqueName: \"kubernetes.io/projected/457d6079-52ef-43f8-8409-6e562a6e8f85-kube-api-access-b4h5s\") pod \"mysqld-exporter-0\" (UID: \"457d6079-52ef-43f8-8409-6e562a6e8f85\") " pod="openstack/mysqld-exporter-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.769324 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d83e13f-52c5-4369-b2bc-ecb9df8b4baf-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"0d83e13f-52c5-4369-b2bc-ecb9df8b4baf\") " pod="openstack/kube-state-metrics-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.771142 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d83e13f-52c5-4369-b2bc-ecb9df8b4baf-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"0d83e13f-52c5-4369-b2bc-ecb9df8b4baf\") " pod="openstack/kube-state-metrics-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.793640 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5td74\" (UniqueName: \"kubernetes.io/projected/0d83e13f-52c5-4369-b2bc-ecb9df8b4baf-kube-api-access-5td74\") pod \"kube-state-metrics-0\" (UID: \"0d83e13f-52c5-4369-b2bc-ecb9df8b4baf\") " pod="openstack/kube-state-metrics-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.798110 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/0d83e13f-52c5-4369-b2bc-ecb9df8b4baf-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"0d83e13f-52c5-4369-b2bc-ecb9df8b4baf\") " pod="openstack/kube-state-metrics-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.869238 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bae6c605-c542-46f5-9cb2-da55a12b83c0-log-httpd\") pod \"ceilometer-0\" (UID: \"bae6c605-c542-46f5-9cb2-da55a12b83c0\") " pod="openstack/ceilometer-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.869389 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/457d6079-52ef-43f8-8409-6e562a6e8f85-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"457d6079-52ef-43f8-8409-6e562a6e8f85\") " pod="openstack/mysqld-exporter-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.870654 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bae6c605-c542-46f5-9cb2-da55a12b83c0-log-httpd\") pod \"ceilometer-0\" (UID: \"bae6c605-c542-46f5-9cb2-da55a12b83c0\") " pod="openstack/ceilometer-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.873440 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mzjc8\" (UniqueName: \"kubernetes.io/projected/bae6c605-c542-46f5-9cb2-da55a12b83c0-kube-api-access-mzjc8\") pod \"ceilometer-0\" (UID: \"bae6c605-c542-46f5-9cb2-da55a12b83c0\") " pod="openstack/ceilometer-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.873548 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bae6c605-c542-46f5-9cb2-da55a12b83c0-run-httpd\") pod \"ceilometer-0\" (UID: \"bae6c605-c542-46f5-9cb2-da55a12b83c0\") " pod="openstack/ceilometer-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.873693 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bae6c605-c542-46f5-9cb2-da55a12b83c0-scripts\") pod \"ceilometer-0\" (UID: \"bae6c605-c542-46f5-9cb2-da55a12b83c0\") " pod="openstack/ceilometer-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.873734 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/bae6c605-c542-46f5-9cb2-da55a12b83c0-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"bae6c605-c542-46f5-9cb2-da55a12b83c0\") " pod="openstack/ceilometer-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.873775 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/457d6079-52ef-43f8-8409-6e562a6e8f85-config-data\") pod \"mysqld-exporter-0\" (UID: \"457d6079-52ef-43f8-8409-6e562a6e8f85\") " pod="openstack/mysqld-exporter-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.873883 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bae6c605-c542-46f5-9cb2-da55a12b83c0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bae6c605-c542-46f5-9cb2-da55a12b83c0\") " pod="openstack/ceilometer-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.874139 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bae6c605-c542-46f5-9cb2-da55a12b83c0-config-data\") pod \"ceilometer-0\" (UID: \"bae6c605-c542-46f5-9cb2-da55a12b83c0\") " pod="openstack/ceilometer-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.874171 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bae6c605-c542-46f5-9cb2-da55a12b83c0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bae6c605-c542-46f5-9cb2-da55a12b83c0\") " pod="openstack/ceilometer-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.874295 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bae6c605-c542-46f5-9cb2-da55a12b83c0-run-httpd\") pod \"ceilometer-0\" (UID: \"bae6c605-c542-46f5-9cb2-da55a12b83c0\") " pod="openstack/ceilometer-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.874260 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b4h5s\" (UniqueName: \"kubernetes.io/projected/457d6079-52ef-43f8-8409-6e562a6e8f85-kube-api-access-b4h5s\") pod \"mysqld-exporter-0\" (UID: \"457d6079-52ef-43f8-8409-6e562a6e8f85\") " pod="openstack/mysqld-exporter-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.874444 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/457d6079-52ef-43f8-8409-6e562a6e8f85-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"457d6079-52ef-43f8-8409-6e562a6e8f85\") " pod="openstack/mysqld-exporter-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.875953 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/457d6079-52ef-43f8-8409-6e562a6e8f85-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"457d6079-52ef-43f8-8409-6e562a6e8f85\") " pod="openstack/mysqld-exporter-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.880013 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bae6c605-c542-46f5-9cb2-da55a12b83c0-scripts\") pod \"ceilometer-0\" (UID: \"bae6c605-c542-46f5-9cb2-da55a12b83c0\") " pod="openstack/ceilometer-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.880042 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bae6c605-c542-46f5-9cb2-da55a12b83c0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bae6c605-c542-46f5-9cb2-da55a12b83c0\") " pod="openstack/ceilometer-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.881556 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/bae6c605-c542-46f5-9cb2-da55a12b83c0-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"bae6c605-c542-46f5-9cb2-da55a12b83c0\") " pod="openstack/ceilometer-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.881882 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/457d6079-52ef-43f8-8409-6e562a6e8f85-config-data\") pod \"mysqld-exporter-0\" (UID: \"457d6079-52ef-43f8-8409-6e562a6e8f85\") " pod="openstack/mysqld-exporter-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.884045 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bae6c605-c542-46f5-9cb2-da55a12b83c0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bae6c605-c542-46f5-9cb2-da55a12b83c0\") " pod="openstack/ceilometer-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.891713 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bae6c605-c542-46f5-9cb2-da55a12b83c0-config-data\") pod \"ceilometer-0\" (UID: \"bae6c605-c542-46f5-9cb2-da55a12b83c0\") " pod="openstack/ceilometer-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.892496 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/457d6079-52ef-43f8-8409-6e562a6e8f85-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"457d6079-52ef-43f8-8409-6e562a6e8f85\") " pod="openstack/mysqld-exporter-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.893207 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mzjc8\" (UniqueName: \"kubernetes.io/projected/bae6c605-c542-46f5-9cb2-da55a12b83c0-kube-api-access-mzjc8\") pod \"ceilometer-0\" (UID: \"bae6c605-c542-46f5-9cb2-da55a12b83c0\") " pod="openstack/ceilometer-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.896551 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b4h5s\" (UniqueName: \"kubernetes.io/projected/457d6079-52ef-43f8-8409-6e562a6e8f85-kube-api-access-b4h5s\") pod \"mysqld-exporter-0\" (UID: \"457d6079-52ef-43f8-8409-6e562a6e8f85\") " pod="openstack/mysqld-exporter-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.945937 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 09 15:35:51 crc kubenswrapper[4716]: I1209 15:35:51.995572 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Dec 09 15:35:52 crc kubenswrapper[4716]: I1209 15:35:52.071093 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 15:35:52 crc kubenswrapper[4716]: I1209 15:35:52.539509 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 09 15:35:52 crc kubenswrapper[4716]: I1209 15:35:52.587891 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="4df53e56-77ad-4210-96da-60de5f7da2ec" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.245:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 09 15:35:52 crc kubenswrapper[4716]: I1209 15:35:52.588277 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="4df53e56-77ad-4210-96da-60de5f7da2ec" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.245:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 09 15:35:52 crc kubenswrapper[4716]: I1209 15:35:52.699477 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Dec 09 15:35:52 crc kubenswrapper[4716]: W1209 15:35:52.703237 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod457d6079_52ef_43f8_8409_6e562a6e8f85.slice/crio-fb948001e71b4d320a0fb65be01e18445db4a05f4d4f62a4a1f1517b12e4095f WatchSource:0}: Error finding container fb948001e71b4d320a0fb65be01e18445db4a05f4d4f62a4a1f1517b12e4095f: Status 404 returned error can't find the container with id fb948001e71b4d320a0fb65be01e18445db4a05f4d4f62a4a1f1517b12e4095f Dec 09 15:35:52 crc kubenswrapper[4716]: W1209 15:35:52.817516 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbae6c605_c542_46f5_9cb2_da55a12b83c0.slice/crio-16ed3ecf702991d0670620c5834900665e337b01d827f07b874349d7ca193334 WatchSource:0}: Error finding container 16ed3ecf702991d0670620c5834900665e337b01d827f07b874349d7ca193334: Status 404 returned error can't find the container with id 16ed3ecf702991d0670620c5834900665e337b01d827f07b874349d7ca193334 Dec 09 15:35:52 crc kubenswrapper[4716]: I1209 15:35:52.821040 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:35:53 crc kubenswrapper[4716]: I1209 15:35:53.232198 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="45a3fab3-ed88-43ff-96d9-fa02a46b23ec" path="/var/lib/kubelet/pods/45a3fab3-ed88-43ff-96d9-fa02a46b23ec/volumes" Dec 09 15:35:53 crc kubenswrapper[4716]: I1209 15:35:53.233460 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="54db648f-e02a-4af0-b425-458a51844527" path="/var/lib/kubelet/pods/54db648f-e02a-4af0-b425-458a51844527/volumes" Dec 09 15:35:53 crc kubenswrapper[4716]: I1209 15:35:53.234377 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cd62374-51f5-4445-94f2-3d575475d8e6" path="/var/lib/kubelet/pods/8cd62374-51f5-4445-94f2-3d575475d8e6/volumes" Dec 09 15:35:53 crc kubenswrapper[4716]: I1209 15:35:53.468932 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"457d6079-52ef-43f8-8409-6e562a6e8f85","Type":"ContainerStarted","Data":"fb948001e71b4d320a0fb65be01e18445db4a05f4d4f62a4a1f1517b12e4095f"} Dec 09 15:35:53 crc kubenswrapper[4716]: I1209 15:35:53.472091 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bae6c605-c542-46f5-9cb2-da55a12b83c0","Type":"ContainerStarted","Data":"16ed3ecf702991d0670620c5834900665e337b01d827f07b874349d7ca193334"} Dec 09 15:35:53 crc kubenswrapper[4716]: I1209 15:35:53.475501 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"0d83e13f-52c5-4369-b2bc-ecb9df8b4baf","Type":"ContainerStarted","Data":"c49fcb8cf2532b78e2cdfa1cac9453dcb0b25afee0fe5eb55b3906a910699100"} Dec 09 15:35:54 crc kubenswrapper[4716]: I1209 15:35:54.494281 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="e61209a4-0a5f-4986-8d3b-e22beea4379a" containerName="aodh-api" containerID="cri-o://552515a36bd9262579215f02c970ef4c17108f6264c9a9fb52a174d336b6ebb1" gracePeriod=30 Dec 09 15:35:54 crc kubenswrapper[4716]: I1209 15:35:54.494285 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="e61209a4-0a5f-4986-8d3b-e22beea4379a" containerName="aodh-notifier" containerID="cri-o://bd34a449bc798b2aa98b7707223bad7268b2aa94f25d1735eefd6a5d3b835e4c" gracePeriod=30 Dec 09 15:35:54 crc kubenswrapper[4716]: I1209 15:35:54.494320 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="e61209a4-0a5f-4986-8d3b-e22beea4379a" containerName="aodh-listener" containerID="cri-o://c21055fe884bad569cc527d99cd0f5391a4494ee044f5cba4c1c1d4d34184db4" gracePeriod=30 Dec 09 15:35:54 crc kubenswrapper[4716]: I1209 15:35:54.494368 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="e61209a4-0a5f-4986-8d3b-e22beea4379a" containerName="aodh-evaluator" containerID="cri-o://45065179f0a8198e9355a29eb0b671792d64a78846103e61f6d7704eb09939c8" gracePeriod=30 Dec 09 15:35:54 crc kubenswrapper[4716]: I1209 15:35:54.494229 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e61209a4-0a5f-4986-8d3b-e22beea4379a","Type":"ContainerStarted","Data":"c21055fe884bad569cc527d99cd0f5391a4494ee044f5cba4c1c1d4d34184db4"} Dec 09 15:35:54 crc kubenswrapper[4716]: I1209 15:35:54.503254 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"0d83e13f-52c5-4369-b2bc-ecb9df8b4baf","Type":"ContainerStarted","Data":"504eefccab532f5fe531a9438de1be0039b8ec7e35b9889b2b0f019219ad924e"} Dec 09 15:35:54 crc kubenswrapper[4716]: I1209 15:35:54.505047 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Dec 09 15:35:54 crc kubenswrapper[4716]: I1209 15:35:54.515356 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"457d6079-52ef-43f8-8409-6e562a6e8f85","Type":"ContainerStarted","Data":"7a6cd10c32b7cd43f8119b97a8bf381c7e66d832933a879832012b880f4fff0d"} Dec 09 15:35:54 crc kubenswrapper[4716]: I1209 15:35:54.528637 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=2.699633077 podStartE2EDuration="12.528604743s" podCreationTimestamp="2025-12-09 15:35:42 +0000 UTC" firstStartedPulling="2025-12-09 15:35:43.7728014 +0000 UTC m=+1630.927545388" lastFinishedPulling="2025-12-09 15:35:53.601773066 +0000 UTC m=+1640.756517054" observedRunningTime="2025-12-09 15:35:54.514431831 +0000 UTC m=+1641.669175829" watchObservedRunningTime="2025-12-09 15:35:54.528604743 +0000 UTC m=+1641.683348721" Dec 09 15:35:54 crc kubenswrapper[4716]: I1209 15:35:54.533954 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bae6c605-c542-46f5-9cb2-da55a12b83c0","Type":"ContainerStarted","Data":"47fda1554bf615d49748aac67dee9af91dd9f460d956155d4ca9d6ea990d9a6a"} Dec 09 15:35:54 crc kubenswrapper[4716]: I1209 15:35:54.579001 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.673846129 podStartE2EDuration="3.57897369s" podCreationTimestamp="2025-12-09 15:35:51 +0000 UTC" firstStartedPulling="2025-12-09 15:35:52.546509292 +0000 UTC m=+1639.701253280" lastFinishedPulling="2025-12-09 15:35:53.451636853 +0000 UTC m=+1640.606380841" observedRunningTime="2025-12-09 15:35:54.546608493 +0000 UTC m=+1641.701352491" watchObservedRunningTime="2025-12-09 15:35:54.57897369 +0000 UTC m=+1641.733717678" Dec 09 15:35:54 crc kubenswrapper[4716]: I1209 15:35:54.607038 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-0" podStartSLOduration=2.6816014580000003 podStartE2EDuration="3.607011864s" podCreationTimestamp="2025-12-09 15:35:51 +0000 UTC" firstStartedPulling="2025-12-09 15:35:52.704914709 +0000 UTC m=+1639.859658697" lastFinishedPulling="2025-12-09 15:35:53.630325115 +0000 UTC m=+1640.785069103" observedRunningTime="2025-12-09 15:35:54.572254909 +0000 UTC m=+1641.726998897" watchObservedRunningTime="2025-12-09 15:35:54.607011864 +0000 UTC m=+1641.761755852" Dec 09 15:35:55 crc kubenswrapper[4716]: I1209 15:35:55.554715 4716 generic.go:334] "Generic (PLEG): container finished" podID="e61209a4-0a5f-4986-8d3b-e22beea4379a" containerID="bd34a449bc798b2aa98b7707223bad7268b2aa94f25d1735eefd6a5d3b835e4c" exitCode=0 Dec 09 15:35:55 crc kubenswrapper[4716]: I1209 15:35:55.555345 4716 generic.go:334] "Generic (PLEG): container finished" podID="e61209a4-0a5f-4986-8d3b-e22beea4379a" containerID="45065179f0a8198e9355a29eb0b671792d64a78846103e61f6d7704eb09939c8" exitCode=0 Dec 09 15:35:55 crc kubenswrapper[4716]: I1209 15:35:55.555359 4716 generic.go:334] "Generic (PLEG): container finished" podID="e61209a4-0a5f-4986-8d3b-e22beea4379a" containerID="552515a36bd9262579215f02c970ef4c17108f6264c9a9fb52a174d336b6ebb1" exitCode=0 Dec 09 15:35:55 crc kubenswrapper[4716]: I1209 15:35:55.554767 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e61209a4-0a5f-4986-8d3b-e22beea4379a","Type":"ContainerDied","Data":"bd34a449bc798b2aa98b7707223bad7268b2aa94f25d1735eefd6a5d3b835e4c"} Dec 09 15:35:55 crc kubenswrapper[4716]: I1209 15:35:55.555451 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e61209a4-0a5f-4986-8d3b-e22beea4379a","Type":"ContainerDied","Data":"45065179f0a8198e9355a29eb0b671792d64a78846103e61f6d7704eb09939c8"} Dec 09 15:35:55 crc kubenswrapper[4716]: I1209 15:35:55.555467 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e61209a4-0a5f-4986-8d3b-e22beea4379a","Type":"ContainerDied","Data":"552515a36bd9262579215f02c970ef4c17108f6264c9a9fb52a174d336b6ebb1"} Dec 09 15:35:55 crc kubenswrapper[4716]: I1209 15:35:55.559555 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bae6c605-c542-46f5-9cb2-da55a12b83c0","Type":"ContainerStarted","Data":"7adbdce18f773ca2727f74a3df3f86335ecce4a369342860051c15dd045953fc"} Dec 09 15:35:56 crc kubenswrapper[4716]: I1209 15:35:56.576916 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bae6c605-c542-46f5-9cb2-da55a12b83c0","Type":"ContainerStarted","Data":"2a8b46615505f25d9a89932f02a9b500f2505363842f5b1013d7ee5515573188"} Dec 09 15:35:58 crc kubenswrapper[4716]: I1209 15:35:58.600217 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bae6c605-c542-46f5-9cb2-da55a12b83c0","Type":"ContainerStarted","Data":"77fcd353f483e570094bc2ede82bca5db618c073a51686815a64e14beb3a968f"} Dec 09 15:35:58 crc kubenswrapper[4716]: I1209 15:35:58.601861 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 09 15:35:58 crc kubenswrapper[4716]: I1209 15:35:58.631078 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.627123203 podStartE2EDuration="7.631036561s" podCreationTimestamp="2025-12-09 15:35:51 +0000 UTC" firstStartedPulling="2025-12-09 15:35:52.824504286 +0000 UTC m=+1639.979248274" lastFinishedPulling="2025-12-09 15:35:57.828417644 +0000 UTC m=+1644.983161632" observedRunningTime="2025-12-09 15:35:58.622391486 +0000 UTC m=+1645.777135474" watchObservedRunningTime="2025-12-09 15:35:58.631036561 +0000 UTC m=+1645.785780549" Dec 09 15:35:59 crc kubenswrapper[4716]: I1209 15:35:59.451029 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Dec 09 15:35:59 crc kubenswrapper[4716]: I1209 15:35:59.456206 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Dec 09 15:35:59 crc kubenswrapper[4716]: I1209 15:35:59.457785 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Dec 09 15:35:59 crc kubenswrapper[4716]: I1209 15:35:59.617147 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Dec 09 15:36:01 crc kubenswrapper[4716]: I1209 15:36:01.509179 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Dec 09 15:36:01 crc kubenswrapper[4716]: I1209 15:36:01.510645 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Dec 09 15:36:01 crc kubenswrapper[4716]: I1209 15:36:01.510712 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 09 15:36:01 crc kubenswrapper[4716]: I1209 15:36:01.512938 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Dec 09 15:36:01 crc kubenswrapper[4716]: I1209 15:36:01.635780 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 09 15:36:01 crc kubenswrapper[4716]: I1209 15:36:01.647269 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Dec 09 15:36:02 crc kubenswrapper[4716]: I1209 15:36:01.972288 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6b7bbf7cf9-tngkz"] Dec 09 15:36:02 crc kubenswrapper[4716]: I1209 15:36:01.976611 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7bbf7cf9-tngkz" Dec 09 15:36:02 crc kubenswrapper[4716]: I1209 15:36:02.020697 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b7bbf7cf9-tngkz"] Dec 09 15:36:02 crc kubenswrapper[4716]: I1209 15:36:02.083160 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ef6af67b-56b5-45e7-8811-3ccf3ea02613-dns-swift-storage-0\") pod \"dnsmasq-dns-6b7bbf7cf9-tngkz\" (UID: \"ef6af67b-56b5-45e7-8811-3ccf3ea02613\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-tngkz" Dec 09 15:36:02 crc kubenswrapper[4716]: I1209 15:36:02.083226 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ef6af67b-56b5-45e7-8811-3ccf3ea02613-ovsdbserver-sb\") pod \"dnsmasq-dns-6b7bbf7cf9-tngkz\" (UID: \"ef6af67b-56b5-45e7-8811-3ccf3ea02613\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-tngkz" Dec 09 15:36:02 crc kubenswrapper[4716]: I1209 15:36:02.083357 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ef6af67b-56b5-45e7-8811-3ccf3ea02613-dns-svc\") pod \"dnsmasq-dns-6b7bbf7cf9-tngkz\" (UID: \"ef6af67b-56b5-45e7-8811-3ccf3ea02613\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-tngkz" Dec 09 15:36:02 crc kubenswrapper[4716]: I1209 15:36:02.083402 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef6af67b-56b5-45e7-8811-3ccf3ea02613-config\") pod \"dnsmasq-dns-6b7bbf7cf9-tngkz\" (UID: \"ef6af67b-56b5-45e7-8811-3ccf3ea02613\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-tngkz" Dec 09 15:36:02 crc kubenswrapper[4716]: I1209 15:36:02.083430 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ef6af67b-56b5-45e7-8811-3ccf3ea02613-ovsdbserver-nb\") pod \"dnsmasq-dns-6b7bbf7cf9-tngkz\" (UID: \"ef6af67b-56b5-45e7-8811-3ccf3ea02613\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-tngkz" Dec 09 15:36:02 crc kubenswrapper[4716]: I1209 15:36:02.083462 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4p7hk\" (UniqueName: \"kubernetes.io/projected/ef6af67b-56b5-45e7-8811-3ccf3ea02613-kube-api-access-4p7hk\") pod \"dnsmasq-dns-6b7bbf7cf9-tngkz\" (UID: \"ef6af67b-56b5-45e7-8811-3ccf3ea02613\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-tngkz" Dec 09 15:36:02 crc kubenswrapper[4716]: I1209 15:36:02.101894 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Dec 09 15:36:02 crc kubenswrapper[4716]: I1209 15:36:02.185931 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4p7hk\" (UniqueName: \"kubernetes.io/projected/ef6af67b-56b5-45e7-8811-3ccf3ea02613-kube-api-access-4p7hk\") pod \"dnsmasq-dns-6b7bbf7cf9-tngkz\" (UID: \"ef6af67b-56b5-45e7-8811-3ccf3ea02613\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-tngkz" Dec 09 15:36:02 crc kubenswrapper[4716]: I1209 15:36:02.185990 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ef6af67b-56b5-45e7-8811-3ccf3ea02613-dns-swift-storage-0\") pod \"dnsmasq-dns-6b7bbf7cf9-tngkz\" (UID: \"ef6af67b-56b5-45e7-8811-3ccf3ea02613\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-tngkz" Dec 09 15:36:02 crc kubenswrapper[4716]: I1209 15:36:02.186045 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ef6af67b-56b5-45e7-8811-3ccf3ea02613-ovsdbserver-sb\") pod \"dnsmasq-dns-6b7bbf7cf9-tngkz\" (UID: \"ef6af67b-56b5-45e7-8811-3ccf3ea02613\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-tngkz" Dec 09 15:36:02 crc kubenswrapper[4716]: I1209 15:36:02.186203 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ef6af67b-56b5-45e7-8811-3ccf3ea02613-dns-svc\") pod \"dnsmasq-dns-6b7bbf7cf9-tngkz\" (UID: \"ef6af67b-56b5-45e7-8811-3ccf3ea02613\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-tngkz" Dec 09 15:36:02 crc kubenswrapper[4716]: I1209 15:36:02.186248 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef6af67b-56b5-45e7-8811-3ccf3ea02613-config\") pod \"dnsmasq-dns-6b7bbf7cf9-tngkz\" (UID: \"ef6af67b-56b5-45e7-8811-3ccf3ea02613\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-tngkz" Dec 09 15:36:02 crc kubenswrapper[4716]: I1209 15:36:02.186275 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ef6af67b-56b5-45e7-8811-3ccf3ea02613-ovsdbserver-nb\") pod \"dnsmasq-dns-6b7bbf7cf9-tngkz\" (UID: \"ef6af67b-56b5-45e7-8811-3ccf3ea02613\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-tngkz" Dec 09 15:36:02 crc kubenswrapper[4716]: I1209 15:36:02.188276 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ef6af67b-56b5-45e7-8811-3ccf3ea02613-dns-svc\") pod \"dnsmasq-dns-6b7bbf7cf9-tngkz\" (UID: \"ef6af67b-56b5-45e7-8811-3ccf3ea02613\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-tngkz" Dec 09 15:36:02 crc kubenswrapper[4716]: I1209 15:36:02.188366 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ef6af67b-56b5-45e7-8811-3ccf3ea02613-ovsdbserver-sb\") pod \"dnsmasq-dns-6b7bbf7cf9-tngkz\" (UID: \"ef6af67b-56b5-45e7-8811-3ccf3ea02613\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-tngkz" Dec 09 15:36:02 crc kubenswrapper[4716]: I1209 15:36:02.188903 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef6af67b-56b5-45e7-8811-3ccf3ea02613-config\") pod \"dnsmasq-dns-6b7bbf7cf9-tngkz\" (UID: \"ef6af67b-56b5-45e7-8811-3ccf3ea02613\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-tngkz" Dec 09 15:36:02 crc kubenswrapper[4716]: I1209 15:36:02.188965 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ef6af67b-56b5-45e7-8811-3ccf3ea02613-ovsdbserver-nb\") pod \"dnsmasq-dns-6b7bbf7cf9-tngkz\" (UID: \"ef6af67b-56b5-45e7-8811-3ccf3ea02613\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-tngkz" Dec 09 15:36:02 crc kubenswrapper[4716]: I1209 15:36:02.189456 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ef6af67b-56b5-45e7-8811-3ccf3ea02613-dns-swift-storage-0\") pod \"dnsmasq-dns-6b7bbf7cf9-tngkz\" (UID: \"ef6af67b-56b5-45e7-8811-3ccf3ea02613\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-tngkz" Dec 09 15:36:02 crc kubenswrapper[4716]: I1209 15:36:02.246957 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4p7hk\" (UniqueName: \"kubernetes.io/projected/ef6af67b-56b5-45e7-8811-3ccf3ea02613-kube-api-access-4p7hk\") pod \"dnsmasq-dns-6b7bbf7cf9-tngkz\" (UID: \"ef6af67b-56b5-45e7-8811-3ccf3ea02613\") " pod="openstack/dnsmasq-dns-6b7bbf7cf9-tngkz" Dec 09 15:36:02 crc kubenswrapper[4716]: I1209 15:36:02.432179 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7bbf7cf9-tngkz" Dec 09 15:36:02 crc kubenswrapper[4716]: I1209 15:36:02.684860 4716 generic.go:334] "Generic (PLEG): container finished" podID="c0b4428b-bbd0-4a02-8823-47813ec73b24" containerID="c9cdf8bdd05218dcae4dd7bd9ee702ea5aeb7ee7321ef5baa615f8de34603028" exitCode=137 Dec 09 15:36:02 crc kubenswrapper[4716]: I1209 15:36:02.685116 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"c0b4428b-bbd0-4a02-8823-47813ec73b24","Type":"ContainerDied","Data":"c9cdf8bdd05218dcae4dd7bd9ee702ea5aeb7ee7321ef5baa615f8de34603028"} Dec 09 15:36:03 crc kubenswrapper[4716]: I1209 15:36:03.015244 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 09 15:36:03 crc kubenswrapper[4716]: I1209 15:36:03.118807 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0b4428b-bbd0-4a02-8823-47813ec73b24-combined-ca-bundle\") pod \"c0b4428b-bbd0-4a02-8823-47813ec73b24\" (UID: \"c0b4428b-bbd0-4a02-8823-47813ec73b24\") " Dec 09 15:36:03 crc kubenswrapper[4716]: I1209 15:36:03.118940 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cgk88\" (UniqueName: \"kubernetes.io/projected/c0b4428b-bbd0-4a02-8823-47813ec73b24-kube-api-access-cgk88\") pod \"c0b4428b-bbd0-4a02-8823-47813ec73b24\" (UID: \"c0b4428b-bbd0-4a02-8823-47813ec73b24\") " Dec 09 15:36:03 crc kubenswrapper[4716]: I1209 15:36:03.119312 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0b4428b-bbd0-4a02-8823-47813ec73b24-config-data\") pod \"c0b4428b-bbd0-4a02-8823-47813ec73b24\" (UID: \"c0b4428b-bbd0-4a02-8823-47813ec73b24\") " Dec 09 15:36:03 crc kubenswrapper[4716]: I1209 15:36:03.138971 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c0b4428b-bbd0-4a02-8823-47813ec73b24-kube-api-access-cgk88" (OuterVolumeSpecName: "kube-api-access-cgk88") pod "c0b4428b-bbd0-4a02-8823-47813ec73b24" (UID: "c0b4428b-bbd0-4a02-8823-47813ec73b24"). InnerVolumeSpecName "kube-api-access-cgk88". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:36:03 crc kubenswrapper[4716]: I1209 15:36:03.231524 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cgk88\" (UniqueName: \"kubernetes.io/projected/c0b4428b-bbd0-4a02-8823-47813ec73b24-kube-api-access-cgk88\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:03 crc kubenswrapper[4716]: I1209 15:36:03.251240 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0b4428b-bbd0-4a02-8823-47813ec73b24-config-data" (OuterVolumeSpecName: "config-data") pod "c0b4428b-bbd0-4a02-8823-47813ec73b24" (UID: "c0b4428b-bbd0-4a02-8823-47813ec73b24"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:36:03 crc kubenswrapper[4716]: I1209 15:36:03.269469 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0b4428b-bbd0-4a02-8823-47813ec73b24-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c0b4428b-bbd0-4a02-8823-47813ec73b24" (UID: "c0b4428b-bbd0-4a02-8823-47813ec73b24"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:36:03 crc kubenswrapper[4716]: I1209 15:36:03.307235 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b7bbf7cf9-tngkz"] Dec 09 15:36:03 crc kubenswrapper[4716]: I1209 15:36:03.339278 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0b4428b-bbd0-4a02-8823-47813ec73b24-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:03 crc kubenswrapper[4716]: I1209 15:36:03.339324 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0b4428b-bbd0-4a02-8823-47813ec73b24-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:03 crc kubenswrapper[4716]: I1209 15:36:03.707552 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"c0b4428b-bbd0-4a02-8823-47813ec73b24","Type":"ContainerDied","Data":"7afe654877063a2699c944467fa9b60b18e135c509a7c1c08119530306202b21"} Dec 09 15:36:03 crc kubenswrapper[4716]: I1209 15:36:03.707613 4716 scope.go:117] "RemoveContainer" containerID="c9cdf8bdd05218dcae4dd7bd9ee702ea5aeb7ee7321ef5baa615f8de34603028" Dec 09 15:36:03 crc kubenswrapper[4716]: I1209 15:36:03.707558 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 09 15:36:03 crc kubenswrapper[4716]: I1209 15:36:03.717871 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7bbf7cf9-tngkz" event={"ID":"ef6af67b-56b5-45e7-8811-3ccf3ea02613","Type":"ContainerStarted","Data":"3fd51d0e4508bfba720c7770bc6625dcaa3c85f3d97fff746c8db2339ec160ad"} Dec 09 15:36:03 crc kubenswrapper[4716]: I1209 15:36:03.718204 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7bbf7cf9-tngkz" event={"ID":"ef6af67b-56b5-45e7-8811-3ccf3ea02613","Type":"ContainerStarted","Data":"23c909c88ef8a7b046c289c1e23b40462e32194c02a73dc15baf5613d7083301"} Dec 09 15:36:03 crc kubenswrapper[4716]: I1209 15:36:03.986369 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 09 15:36:04 crc kubenswrapper[4716]: I1209 15:36:04.001548 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 09 15:36:04 crc kubenswrapper[4716]: I1209 15:36:04.025942 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 09 15:36:04 crc kubenswrapper[4716]: E1209 15:36:04.028662 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0b4428b-bbd0-4a02-8823-47813ec73b24" containerName="nova-cell1-novncproxy-novncproxy" Dec 09 15:36:04 crc kubenswrapper[4716]: I1209 15:36:04.028697 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0b4428b-bbd0-4a02-8823-47813ec73b24" containerName="nova-cell1-novncproxy-novncproxy" Dec 09 15:36:04 crc kubenswrapper[4716]: I1209 15:36:04.031436 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0b4428b-bbd0-4a02-8823-47813ec73b24" containerName="nova-cell1-novncproxy-novncproxy" Dec 09 15:36:04 crc kubenswrapper[4716]: I1209 15:36:04.055064 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 09 15:36:04 crc kubenswrapper[4716]: I1209 15:36:04.059447 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Dec 09 15:36:04 crc kubenswrapper[4716]: I1209 15:36:04.059735 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Dec 09 15:36:04 crc kubenswrapper[4716]: I1209 15:36:04.060001 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Dec 09 15:36:04 crc kubenswrapper[4716]: I1209 15:36:04.124798 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 09 15:36:04 crc kubenswrapper[4716]: I1209 15:36:04.172053 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ad0a53e-9434-4af3-bfdc-b7a145293bec-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"7ad0a53e-9434-4af3-bfdc-b7a145293bec\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 15:36:04 crc kubenswrapper[4716]: I1209 15:36:04.172156 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rchpf\" (UniqueName: \"kubernetes.io/projected/7ad0a53e-9434-4af3-bfdc-b7a145293bec-kube-api-access-rchpf\") pod \"nova-cell1-novncproxy-0\" (UID: \"7ad0a53e-9434-4af3-bfdc-b7a145293bec\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 15:36:04 crc kubenswrapper[4716]: I1209 15:36:04.172200 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ad0a53e-9434-4af3-bfdc-b7a145293bec-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"7ad0a53e-9434-4af3-bfdc-b7a145293bec\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 15:36:04 crc kubenswrapper[4716]: I1209 15:36:04.172269 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ad0a53e-9434-4af3-bfdc-b7a145293bec-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"7ad0a53e-9434-4af3-bfdc-b7a145293bec\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 15:36:04 crc kubenswrapper[4716]: I1209 15:36:04.172390 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ad0a53e-9434-4af3-bfdc-b7a145293bec-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"7ad0a53e-9434-4af3-bfdc-b7a145293bec\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 15:36:04 crc kubenswrapper[4716]: I1209 15:36:04.278804 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ad0a53e-9434-4af3-bfdc-b7a145293bec-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"7ad0a53e-9434-4af3-bfdc-b7a145293bec\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 15:36:04 crc kubenswrapper[4716]: I1209 15:36:04.278987 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rchpf\" (UniqueName: \"kubernetes.io/projected/7ad0a53e-9434-4af3-bfdc-b7a145293bec-kube-api-access-rchpf\") pod \"nova-cell1-novncproxy-0\" (UID: \"7ad0a53e-9434-4af3-bfdc-b7a145293bec\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 15:36:04 crc kubenswrapper[4716]: I1209 15:36:04.279066 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ad0a53e-9434-4af3-bfdc-b7a145293bec-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"7ad0a53e-9434-4af3-bfdc-b7a145293bec\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 15:36:04 crc kubenswrapper[4716]: I1209 15:36:04.279164 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ad0a53e-9434-4af3-bfdc-b7a145293bec-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"7ad0a53e-9434-4af3-bfdc-b7a145293bec\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 15:36:04 crc kubenswrapper[4716]: I1209 15:36:04.279337 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ad0a53e-9434-4af3-bfdc-b7a145293bec-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"7ad0a53e-9434-4af3-bfdc-b7a145293bec\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 15:36:04 crc kubenswrapper[4716]: I1209 15:36:04.284700 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ad0a53e-9434-4af3-bfdc-b7a145293bec-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"7ad0a53e-9434-4af3-bfdc-b7a145293bec\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 15:36:04 crc kubenswrapper[4716]: I1209 15:36:04.285376 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ad0a53e-9434-4af3-bfdc-b7a145293bec-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"7ad0a53e-9434-4af3-bfdc-b7a145293bec\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 15:36:04 crc kubenswrapper[4716]: I1209 15:36:04.288620 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ad0a53e-9434-4af3-bfdc-b7a145293bec-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"7ad0a53e-9434-4af3-bfdc-b7a145293bec\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 15:36:04 crc kubenswrapper[4716]: I1209 15:36:04.306549 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rchpf\" (UniqueName: \"kubernetes.io/projected/7ad0a53e-9434-4af3-bfdc-b7a145293bec-kube-api-access-rchpf\") pod \"nova-cell1-novncproxy-0\" (UID: \"7ad0a53e-9434-4af3-bfdc-b7a145293bec\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 15:36:04 crc kubenswrapper[4716]: I1209 15:36:04.314855 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ad0a53e-9434-4af3-bfdc-b7a145293bec-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"7ad0a53e-9434-4af3-bfdc-b7a145293bec\") " pod="openstack/nova-cell1-novncproxy-0" Dec 09 15:36:04 crc kubenswrapper[4716]: I1209 15:36:04.398079 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 09 15:36:04 crc kubenswrapper[4716]: I1209 15:36:04.747840 4716 generic.go:334] "Generic (PLEG): container finished" podID="ef6af67b-56b5-45e7-8811-3ccf3ea02613" containerID="3fd51d0e4508bfba720c7770bc6625dcaa3c85f3d97fff746c8db2339ec160ad" exitCode=0 Dec 09 15:36:04 crc kubenswrapper[4716]: I1209 15:36:04.749761 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7bbf7cf9-tngkz" event={"ID":"ef6af67b-56b5-45e7-8811-3ccf3ea02613","Type":"ContainerDied","Data":"3fd51d0e4508bfba720c7770bc6625dcaa3c85f3d97fff746c8db2339ec160ad"} Dec 09 15:36:04 crc kubenswrapper[4716]: I1209 15:36:04.749914 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7bbf7cf9-tngkz" event={"ID":"ef6af67b-56b5-45e7-8811-3ccf3ea02613","Type":"ContainerStarted","Data":"b1ddc6c27910cc1411c865b8b2c0e8b0f224d5df32b9ab97972b290f8f266983"} Dec 09 15:36:04 crc kubenswrapper[4716]: I1209 15:36:04.751751 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6b7bbf7cf9-tngkz" Dec 09 15:36:04 crc kubenswrapper[4716]: I1209 15:36:04.779715 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6b7bbf7cf9-tngkz" podStartSLOduration=3.779685577 podStartE2EDuration="3.779685577s" podCreationTimestamp="2025-12-09 15:36:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:36:04.77554234 +0000 UTC m=+1651.930286328" watchObservedRunningTime="2025-12-09 15:36:04.779685577 +0000 UTC m=+1651.934429575" Dec 09 15:36:04 crc kubenswrapper[4716]: I1209 15:36:04.833962 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 09 15:36:04 crc kubenswrapper[4716]: I1209 15:36:04.834207 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="4df53e56-77ad-4210-96da-60de5f7da2ec" containerName="nova-api-log" containerID="cri-o://b121f4c3a4c6d1dbca956750cca146c49c684c1175056bd450ce6ccf11331730" gracePeriod=30 Dec 09 15:36:04 crc kubenswrapper[4716]: I1209 15:36:04.834751 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="4df53e56-77ad-4210-96da-60de5f7da2ec" containerName="nova-api-api" containerID="cri-o://c3657509e6b9f17f36de439ca7a6d4b9ea30c9ef780dd4102b006505401485b3" gracePeriod=30 Dec 09 15:36:04 crc kubenswrapper[4716]: I1209 15:36:04.952412 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 09 15:36:04 crc kubenswrapper[4716]: W1209 15:36:04.976074 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7ad0a53e_9434_4af3_bfdc_b7a145293bec.slice/crio-922eca7153c6f7fb43c17ec2aab5fd6c2a6e15b4de4695635cb601aa9c8cb884 WatchSource:0}: Error finding container 922eca7153c6f7fb43c17ec2aab5fd6c2a6e15b4de4695635cb601aa9c8cb884: Status 404 returned error can't find the container with id 922eca7153c6f7fb43c17ec2aab5fd6c2a6e15b4de4695635cb601aa9c8cb884 Dec 09 15:36:05 crc kubenswrapper[4716]: I1209 15:36:05.240216 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c0b4428b-bbd0-4a02-8823-47813ec73b24" path="/var/lib/kubelet/pods/c0b4428b-bbd0-4a02-8823-47813ec73b24/volumes" Dec 09 15:36:05 crc kubenswrapper[4716]: I1209 15:36:05.294035 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:36:05 crc kubenswrapper[4716]: I1209 15:36:05.294415 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bae6c605-c542-46f5-9cb2-da55a12b83c0" containerName="ceilometer-central-agent" containerID="cri-o://47fda1554bf615d49748aac67dee9af91dd9f460d956155d4ca9d6ea990d9a6a" gracePeriod=30 Dec 09 15:36:05 crc kubenswrapper[4716]: I1209 15:36:05.294508 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bae6c605-c542-46f5-9cb2-da55a12b83c0" containerName="proxy-httpd" containerID="cri-o://77fcd353f483e570094bc2ede82bca5db618c073a51686815a64e14beb3a968f" gracePeriod=30 Dec 09 15:36:05 crc kubenswrapper[4716]: I1209 15:36:05.294508 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bae6c605-c542-46f5-9cb2-da55a12b83c0" containerName="sg-core" containerID="cri-o://2a8b46615505f25d9a89932f02a9b500f2505363842f5b1013d7ee5515573188" gracePeriod=30 Dec 09 15:36:05 crc kubenswrapper[4716]: I1209 15:36:05.294966 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bae6c605-c542-46f5-9cb2-da55a12b83c0" containerName="ceilometer-notification-agent" containerID="cri-o://7adbdce18f773ca2727f74a3df3f86335ecce4a369342860051c15dd045953fc" gracePeriod=30 Dec 09 15:36:05 crc kubenswrapper[4716]: I1209 15:36:05.766094 4716 generic.go:334] "Generic (PLEG): container finished" podID="4df53e56-77ad-4210-96da-60de5f7da2ec" containerID="b121f4c3a4c6d1dbca956750cca146c49c684c1175056bd450ce6ccf11331730" exitCode=143 Dec 09 15:36:05 crc kubenswrapper[4716]: I1209 15:36:05.766201 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4df53e56-77ad-4210-96da-60de5f7da2ec","Type":"ContainerDied","Data":"b121f4c3a4c6d1dbca956750cca146c49c684c1175056bd450ce6ccf11331730"} Dec 09 15:36:05 crc kubenswrapper[4716]: I1209 15:36:05.770009 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"7ad0a53e-9434-4af3-bfdc-b7a145293bec","Type":"ContainerStarted","Data":"d1a997a8f1e9d8d39ac5630cbdfe2d1bd4f9fbd74e64d2f7f0f2dff57ef04b1e"} Dec 09 15:36:05 crc kubenswrapper[4716]: I1209 15:36:05.770052 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"7ad0a53e-9434-4af3-bfdc-b7a145293bec","Type":"ContainerStarted","Data":"922eca7153c6f7fb43c17ec2aab5fd6c2a6e15b4de4695635cb601aa9c8cb884"} Dec 09 15:36:05 crc kubenswrapper[4716]: I1209 15:36:05.773613 4716 generic.go:334] "Generic (PLEG): container finished" podID="bae6c605-c542-46f5-9cb2-da55a12b83c0" containerID="77fcd353f483e570094bc2ede82bca5db618c073a51686815a64e14beb3a968f" exitCode=0 Dec 09 15:36:05 crc kubenswrapper[4716]: I1209 15:36:05.773662 4716 generic.go:334] "Generic (PLEG): container finished" podID="bae6c605-c542-46f5-9cb2-da55a12b83c0" containerID="2a8b46615505f25d9a89932f02a9b500f2505363842f5b1013d7ee5515573188" exitCode=2 Dec 09 15:36:05 crc kubenswrapper[4716]: I1209 15:36:05.773670 4716 generic.go:334] "Generic (PLEG): container finished" podID="bae6c605-c542-46f5-9cb2-da55a12b83c0" containerID="47fda1554bf615d49748aac67dee9af91dd9f460d956155d4ca9d6ea990d9a6a" exitCode=0 Dec 09 15:36:05 crc kubenswrapper[4716]: I1209 15:36:05.773706 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bae6c605-c542-46f5-9cb2-da55a12b83c0","Type":"ContainerDied","Data":"77fcd353f483e570094bc2ede82bca5db618c073a51686815a64e14beb3a968f"} Dec 09 15:36:05 crc kubenswrapper[4716]: I1209 15:36:05.773747 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bae6c605-c542-46f5-9cb2-da55a12b83c0","Type":"ContainerDied","Data":"2a8b46615505f25d9a89932f02a9b500f2505363842f5b1013d7ee5515573188"} Dec 09 15:36:05 crc kubenswrapper[4716]: I1209 15:36:05.773765 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bae6c605-c542-46f5-9cb2-da55a12b83c0","Type":"ContainerDied","Data":"47fda1554bf615d49748aac67dee9af91dd9f460d956155d4ca9d6ea990d9a6a"} Dec 09 15:36:05 crc kubenswrapper[4716]: I1209 15:36:05.796617 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.796597066 podStartE2EDuration="2.796597066s" podCreationTimestamp="2025-12-09 15:36:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:36:05.789007001 +0000 UTC m=+1652.943750989" watchObservedRunningTime="2025-12-09 15:36:05.796597066 +0000 UTC m=+1652.951341054" Dec 09 15:36:06 crc kubenswrapper[4716]: I1209 15:36:06.793264 4716 generic.go:334] "Generic (PLEG): container finished" podID="bae6c605-c542-46f5-9cb2-da55a12b83c0" containerID="7adbdce18f773ca2727f74a3df3f86335ecce4a369342860051c15dd045953fc" exitCode=0 Dec 09 15:36:06 crc kubenswrapper[4716]: I1209 15:36:06.793705 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bae6c605-c542-46f5-9cb2-da55a12b83c0","Type":"ContainerDied","Data":"7adbdce18f773ca2727f74a3df3f86335ecce4a369342860051c15dd045953fc"} Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.044300 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.069770 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bae6c605-c542-46f5-9cb2-da55a12b83c0-sg-core-conf-yaml\") pod \"bae6c605-c542-46f5-9cb2-da55a12b83c0\" (UID: \"bae6c605-c542-46f5-9cb2-da55a12b83c0\") " Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.070000 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bae6c605-c542-46f5-9cb2-da55a12b83c0-config-data\") pod \"bae6c605-c542-46f5-9cb2-da55a12b83c0\" (UID: \"bae6c605-c542-46f5-9cb2-da55a12b83c0\") " Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.070225 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mzjc8\" (UniqueName: \"kubernetes.io/projected/bae6c605-c542-46f5-9cb2-da55a12b83c0-kube-api-access-mzjc8\") pod \"bae6c605-c542-46f5-9cb2-da55a12b83c0\" (UID: \"bae6c605-c542-46f5-9cb2-da55a12b83c0\") " Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.070284 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bae6c605-c542-46f5-9cb2-da55a12b83c0-log-httpd\") pod \"bae6c605-c542-46f5-9cb2-da55a12b83c0\" (UID: \"bae6c605-c542-46f5-9cb2-da55a12b83c0\") " Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.070347 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bae6c605-c542-46f5-9cb2-da55a12b83c0-scripts\") pod \"bae6c605-c542-46f5-9cb2-da55a12b83c0\" (UID: \"bae6c605-c542-46f5-9cb2-da55a12b83c0\") " Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.070434 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/bae6c605-c542-46f5-9cb2-da55a12b83c0-ceilometer-tls-certs\") pod \"bae6c605-c542-46f5-9cb2-da55a12b83c0\" (UID: \"bae6c605-c542-46f5-9cb2-da55a12b83c0\") " Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.070475 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bae6c605-c542-46f5-9cb2-da55a12b83c0-run-httpd\") pod \"bae6c605-c542-46f5-9cb2-da55a12b83c0\" (UID: \"bae6c605-c542-46f5-9cb2-da55a12b83c0\") " Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.070531 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bae6c605-c542-46f5-9cb2-da55a12b83c0-combined-ca-bundle\") pod \"bae6c605-c542-46f5-9cb2-da55a12b83c0\" (UID: \"bae6c605-c542-46f5-9cb2-da55a12b83c0\") " Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.073981 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bae6c605-c542-46f5-9cb2-da55a12b83c0-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "bae6c605-c542-46f5-9cb2-da55a12b83c0" (UID: "bae6c605-c542-46f5-9cb2-da55a12b83c0"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.077989 4716 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bae6c605-c542-46f5-9cb2-da55a12b83c0-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.092515 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bae6c605-c542-46f5-9cb2-da55a12b83c0-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "bae6c605-c542-46f5-9cb2-da55a12b83c0" (UID: "bae6c605-c542-46f5-9cb2-da55a12b83c0"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.102549 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bae6c605-c542-46f5-9cb2-da55a12b83c0-kube-api-access-mzjc8" (OuterVolumeSpecName: "kube-api-access-mzjc8") pod "bae6c605-c542-46f5-9cb2-da55a12b83c0" (UID: "bae6c605-c542-46f5-9cb2-da55a12b83c0"). InnerVolumeSpecName "kube-api-access-mzjc8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.104718 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bae6c605-c542-46f5-9cb2-da55a12b83c0-scripts" (OuterVolumeSpecName: "scripts") pod "bae6c605-c542-46f5-9cb2-da55a12b83c0" (UID: "bae6c605-c542-46f5-9cb2-da55a12b83c0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.134924 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bae6c605-c542-46f5-9cb2-da55a12b83c0-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "bae6c605-c542-46f5-9cb2-da55a12b83c0" (UID: "bae6c605-c542-46f5-9cb2-da55a12b83c0"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.180730 4716 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bae6c605-c542-46f5-9cb2-da55a12b83c0-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.180765 4716 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bae6c605-c542-46f5-9cb2-da55a12b83c0-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.180776 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mzjc8\" (UniqueName: \"kubernetes.io/projected/bae6c605-c542-46f5-9cb2-da55a12b83c0-kube-api-access-mzjc8\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.180785 4716 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bae6c605-c542-46f5-9cb2-da55a12b83c0-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.186092 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bae6c605-c542-46f5-9cb2-da55a12b83c0-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "bae6c605-c542-46f5-9cb2-da55a12b83c0" (UID: "bae6c605-c542-46f5-9cb2-da55a12b83c0"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.229157 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bae6c605-c542-46f5-9cb2-da55a12b83c0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bae6c605-c542-46f5-9cb2-da55a12b83c0" (UID: "bae6c605-c542-46f5-9cb2-da55a12b83c0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.277730 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bae6c605-c542-46f5-9cb2-da55a12b83c0-config-data" (OuterVolumeSpecName: "config-data") pod "bae6c605-c542-46f5-9cb2-da55a12b83c0" (UID: "bae6c605-c542-46f5-9cb2-da55a12b83c0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.284779 4716 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/bae6c605-c542-46f5-9cb2-da55a12b83c0-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.284812 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bae6c605-c542-46f5-9cb2-da55a12b83c0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.284824 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bae6c605-c542-46f5-9cb2-da55a12b83c0-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.815374 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bae6c605-c542-46f5-9cb2-da55a12b83c0","Type":"ContainerDied","Data":"16ed3ecf702991d0670620c5834900665e337b01d827f07b874349d7ca193334"} Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.815459 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.815466 4716 scope.go:117] "RemoveContainer" containerID="77fcd353f483e570094bc2ede82bca5db618c073a51686815a64e14beb3a968f" Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.852379 4716 scope.go:117] "RemoveContainer" containerID="2a8b46615505f25d9a89932f02a9b500f2505363842f5b1013d7ee5515573188" Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.866603 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.904681 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.906914 4716 scope.go:117] "RemoveContainer" containerID="7adbdce18f773ca2727f74a3df3f86335ecce4a369342860051c15dd045953fc" Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.917532 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:36:07 crc kubenswrapper[4716]: E1209 15:36:07.918243 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bae6c605-c542-46f5-9cb2-da55a12b83c0" containerName="sg-core" Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.918273 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="bae6c605-c542-46f5-9cb2-da55a12b83c0" containerName="sg-core" Dec 09 15:36:07 crc kubenswrapper[4716]: E1209 15:36:07.918321 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bae6c605-c542-46f5-9cb2-da55a12b83c0" containerName="proxy-httpd" Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.918338 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="bae6c605-c542-46f5-9cb2-da55a12b83c0" containerName="proxy-httpd" Dec 09 15:36:07 crc kubenswrapper[4716]: E1209 15:36:07.918391 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bae6c605-c542-46f5-9cb2-da55a12b83c0" containerName="ceilometer-central-agent" Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.918407 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="bae6c605-c542-46f5-9cb2-da55a12b83c0" containerName="ceilometer-central-agent" Dec 09 15:36:07 crc kubenswrapper[4716]: E1209 15:36:07.918465 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bae6c605-c542-46f5-9cb2-da55a12b83c0" containerName="ceilometer-notification-agent" Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.918475 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="bae6c605-c542-46f5-9cb2-da55a12b83c0" containerName="ceilometer-notification-agent" Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.919268 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="bae6c605-c542-46f5-9cb2-da55a12b83c0" containerName="proxy-httpd" Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.919314 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="bae6c605-c542-46f5-9cb2-da55a12b83c0" containerName="sg-core" Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.919351 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="bae6c605-c542-46f5-9cb2-da55a12b83c0" containerName="ceilometer-central-agent" Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.919381 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="bae6c605-c542-46f5-9cb2-da55a12b83c0" containerName="ceilometer-notification-agent" Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.928247 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.929910 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.939663 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.939964 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.940344 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 09 15:36:07 crc kubenswrapper[4716]: I1209 15:36:07.986491 4716 scope.go:117] "RemoveContainer" containerID="47fda1554bf615d49748aac67dee9af91dd9f460d956155d4ca9d6ea990d9a6a" Dec 09 15:36:08 crc kubenswrapper[4716]: I1209 15:36:08.008260 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/890bfa10-578a-4a64-8829-b2898dd4e02a-scripts\") pod \"ceilometer-0\" (UID: \"890bfa10-578a-4a64-8829-b2898dd4e02a\") " pod="openstack/ceilometer-0" Dec 09 15:36:08 crc kubenswrapper[4716]: I1209 15:36:08.008303 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/890bfa10-578a-4a64-8829-b2898dd4e02a-log-httpd\") pod \"ceilometer-0\" (UID: \"890bfa10-578a-4a64-8829-b2898dd4e02a\") " pod="openstack/ceilometer-0" Dec 09 15:36:08 crc kubenswrapper[4716]: I1209 15:36:08.008413 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kq29d\" (UniqueName: \"kubernetes.io/projected/890bfa10-578a-4a64-8829-b2898dd4e02a-kube-api-access-kq29d\") pod \"ceilometer-0\" (UID: \"890bfa10-578a-4a64-8829-b2898dd4e02a\") " pod="openstack/ceilometer-0" Dec 09 15:36:08 crc kubenswrapper[4716]: I1209 15:36:08.008462 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/890bfa10-578a-4a64-8829-b2898dd4e02a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"890bfa10-578a-4a64-8829-b2898dd4e02a\") " pod="openstack/ceilometer-0" Dec 09 15:36:08 crc kubenswrapper[4716]: I1209 15:36:08.008484 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/890bfa10-578a-4a64-8829-b2898dd4e02a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"890bfa10-578a-4a64-8829-b2898dd4e02a\") " pod="openstack/ceilometer-0" Dec 09 15:36:08 crc kubenswrapper[4716]: I1209 15:36:08.008505 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/890bfa10-578a-4a64-8829-b2898dd4e02a-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"890bfa10-578a-4a64-8829-b2898dd4e02a\") " pod="openstack/ceilometer-0" Dec 09 15:36:08 crc kubenswrapper[4716]: I1209 15:36:08.008520 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/890bfa10-578a-4a64-8829-b2898dd4e02a-run-httpd\") pod \"ceilometer-0\" (UID: \"890bfa10-578a-4a64-8829-b2898dd4e02a\") " pod="openstack/ceilometer-0" Dec 09 15:36:08 crc kubenswrapper[4716]: I1209 15:36:08.008600 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/890bfa10-578a-4a64-8829-b2898dd4e02a-config-data\") pod \"ceilometer-0\" (UID: \"890bfa10-578a-4a64-8829-b2898dd4e02a\") " pod="openstack/ceilometer-0" Dec 09 15:36:08 crc kubenswrapper[4716]: I1209 15:36:08.110835 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/890bfa10-578a-4a64-8829-b2898dd4e02a-config-data\") pod \"ceilometer-0\" (UID: \"890bfa10-578a-4a64-8829-b2898dd4e02a\") " pod="openstack/ceilometer-0" Dec 09 15:36:08 crc kubenswrapper[4716]: I1209 15:36:08.111155 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/890bfa10-578a-4a64-8829-b2898dd4e02a-scripts\") pod \"ceilometer-0\" (UID: \"890bfa10-578a-4a64-8829-b2898dd4e02a\") " pod="openstack/ceilometer-0" Dec 09 15:36:08 crc kubenswrapper[4716]: I1209 15:36:08.111227 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/890bfa10-578a-4a64-8829-b2898dd4e02a-log-httpd\") pod \"ceilometer-0\" (UID: \"890bfa10-578a-4a64-8829-b2898dd4e02a\") " pod="openstack/ceilometer-0" Dec 09 15:36:08 crc kubenswrapper[4716]: I1209 15:36:08.111347 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kq29d\" (UniqueName: \"kubernetes.io/projected/890bfa10-578a-4a64-8829-b2898dd4e02a-kube-api-access-kq29d\") pod \"ceilometer-0\" (UID: \"890bfa10-578a-4a64-8829-b2898dd4e02a\") " pod="openstack/ceilometer-0" Dec 09 15:36:08 crc kubenswrapper[4716]: I1209 15:36:08.111438 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/890bfa10-578a-4a64-8829-b2898dd4e02a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"890bfa10-578a-4a64-8829-b2898dd4e02a\") " pod="openstack/ceilometer-0" Dec 09 15:36:08 crc kubenswrapper[4716]: I1209 15:36:08.111508 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/890bfa10-578a-4a64-8829-b2898dd4e02a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"890bfa10-578a-4a64-8829-b2898dd4e02a\") " pod="openstack/ceilometer-0" Dec 09 15:36:08 crc kubenswrapper[4716]: I1209 15:36:08.111581 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/890bfa10-578a-4a64-8829-b2898dd4e02a-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"890bfa10-578a-4a64-8829-b2898dd4e02a\") " pod="openstack/ceilometer-0" Dec 09 15:36:08 crc kubenswrapper[4716]: I1209 15:36:08.112092 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/890bfa10-578a-4a64-8829-b2898dd4e02a-run-httpd\") pod \"ceilometer-0\" (UID: \"890bfa10-578a-4a64-8829-b2898dd4e02a\") " pod="openstack/ceilometer-0" Dec 09 15:36:08 crc kubenswrapper[4716]: I1209 15:36:08.112517 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/890bfa10-578a-4a64-8829-b2898dd4e02a-run-httpd\") pod \"ceilometer-0\" (UID: \"890bfa10-578a-4a64-8829-b2898dd4e02a\") " pod="openstack/ceilometer-0" Dec 09 15:36:08 crc kubenswrapper[4716]: I1209 15:36:08.112551 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/890bfa10-578a-4a64-8829-b2898dd4e02a-log-httpd\") pod \"ceilometer-0\" (UID: \"890bfa10-578a-4a64-8829-b2898dd4e02a\") " pod="openstack/ceilometer-0" Dec 09 15:36:08 crc kubenswrapper[4716]: I1209 15:36:08.115893 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/890bfa10-578a-4a64-8829-b2898dd4e02a-config-data\") pod \"ceilometer-0\" (UID: \"890bfa10-578a-4a64-8829-b2898dd4e02a\") " pod="openstack/ceilometer-0" Dec 09 15:36:08 crc kubenswrapper[4716]: I1209 15:36:08.115996 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/890bfa10-578a-4a64-8829-b2898dd4e02a-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"890bfa10-578a-4a64-8829-b2898dd4e02a\") " pod="openstack/ceilometer-0" Dec 09 15:36:08 crc kubenswrapper[4716]: I1209 15:36:08.116461 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/890bfa10-578a-4a64-8829-b2898dd4e02a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"890bfa10-578a-4a64-8829-b2898dd4e02a\") " pod="openstack/ceilometer-0" Dec 09 15:36:08 crc kubenswrapper[4716]: I1209 15:36:08.125743 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/890bfa10-578a-4a64-8829-b2898dd4e02a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"890bfa10-578a-4a64-8829-b2898dd4e02a\") " pod="openstack/ceilometer-0" Dec 09 15:36:08 crc kubenswrapper[4716]: I1209 15:36:08.127187 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/890bfa10-578a-4a64-8829-b2898dd4e02a-scripts\") pod \"ceilometer-0\" (UID: \"890bfa10-578a-4a64-8829-b2898dd4e02a\") " pod="openstack/ceilometer-0" Dec 09 15:36:08 crc kubenswrapper[4716]: I1209 15:36:08.130195 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kq29d\" (UniqueName: \"kubernetes.io/projected/890bfa10-578a-4a64-8829-b2898dd4e02a-kube-api-access-kq29d\") pod \"ceilometer-0\" (UID: \"890bfa10-578a-4a64-8829-b2898dd4e02a\") " pod="openstack/ceilometer-0" Dec 09 15:36:08 crc kubenswrapper[4716]: I1209 15:36:08.268525 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 15:36:08 crc kubenswrapper[4716]: I1209 15:36:08.820779 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:36:08 crc kubenswrapper[4716]: I1209 15:36:08.831033 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"890bfa10-578a-4a64-8829-b2898dd4e02a","Type":"ContainerStarted","Data":"8587d3978013827c7d9ad87e662c364055e613accf32ddd4466a156dc9757d3d"} Dec 09 15:36:08 crc kubenswrapper[4716]: I1209 15:36:08.833091 4716 generic.go:334] "Generic (PLEG): container finished" podID="4df53e56-77ad-4210-96da-60de5f7da2ec" containerID="c3657509e6b9f17f36de439ca7a6d4b9ea30c9ef780dd4102b006505401485b3" exitCode=0 Dec 09 15:36:08 crc kubenswrapper[4716]: I1209 15:36:08.833137 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4df53e56-77ad-4210-96da-60de5f7da2ec","Type":"ContainerDied","Data":"c3657509e6b9f17f36de439ca7a6d4b9ea30c9ef780dd4102b006505401485b3"} Dec 09 15:36:09 crc kubenswrapper[4716]: I1209 15:36:09.232815 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bae6c605-c542-46f5-9cb2-da55a12b83c0" path="/var/lib/kubelet/pods/bae6c605-c542-46f5-9cb2-da55a12b83c0/volumes" Dec 09 15:36:09 crc kubenswrapper[4716]: I1209 15:36:09.240138 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 09 15:36:09 crc kubenswrapper[4716]: I1209 15:36:09.357562 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4df53e56-77ad-4210-96da-60de5f7da2ec-combined-ca-bundle\") pod \"4df53e56-77ad-4210-96da-60de5f7da2ec\" (UID: \"4df53e56-77ad-4210-96da-60de5f7da2ec\") " Dec 09 15:36:09 crc kubenswrapper[4716]: I1209 15:36:09.357705 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-799z8\" (UniqueName: \"kubernetes.io/projected/4df53e56-77ad-4210-96da-60de5f7da2ec-kube-api-access-799z8\") pod \"4df53e56-77ad-4210-96da-60de5f7da2ec\" (UID: \"4df53e56-77ad-4210-96da-60de5f7da2ec\") " Dec 09 15:36:09 crc kubenswrapper[4716]: I1209 15:36:09.357768 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4df53e56-77ad-4210-96da-60de5f7da2ec-config-data\") pod \"4df53e56-77ad-4210-96da-60de5f7da2ec\" (UID: \"4df53e56-77ad-4210-96da-60de5f7da2ec\") " Dec 09 15:36:09 crc kubenswrapper[4716]: I1209 15:36:09.358483 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4df53e56-77ad-4210-96da-60de5f7da2ec-logs\") pod \"4df53e56-77ad-4210-96da-60de5f7da2ec\" (UID: \"4df53e56-77ad-4210-96da-60de5f7da2ec\") " Dec 09 15:36:09 crc kubenswrapper[4716]: I1209 15:36:09.359973 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4df53e56-77ad-4210-96da-60de5f7da2ec-logs" (OuterVolumeSpecName: "logs") pod "4df53e56-77ad-4210-96da-60de5f7da2ec" (UID: "4df53e56-77ad-4210-96da-60de5f7da2ec"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:36:09 crc kubenswrapper[4716]: I1209 15:36:09.382392 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4df53e56-77ad-4210-96da-60de5f7da2ec-kube-api-access-799z8" (OuterVolumeSpecName: "kube-api-access-799z8") pod "4df53e56-77ad-4210-96da-60de5f7da2ec" (UID: "4df53e56-77ad-4210-96da-60de5f7da2ec"). InnerVolumeSpecName "kube-api-access-799z8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:36:09 crc kubenswrapper[4716]: I1209 15:36:09.399294 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Dec 09 15:36:09 crc kubenswrapper[4716]: I1209 15:36:09.412909 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4df53e56-77ad-4210-96da-60de5f7da2ec-config-data" (OuterVolumeSpecName: "config-data") pod "4df53e56-77ad-4210-96da-60de5f7da2ec" (UID: "4df53e56-77ad-4210-96da-60de5f7da2ec"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:36:09 crc kubenswrapper[4716]: I1209 15:36:09.418878 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4df53e56-77ad-4210-96da-60de5f7da2ec-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4df53e56-77ad-4210-96da-60de5f7da2ec" (UID: "4df53e56-77ad-4210-96da-60de5f7da2ec"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:36:09 crc kubenswrapper[4716]: I1209 15:36:09.460963 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4df53e56-77ad-4210-96da-60de5f7da2ec-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:09 crc kubenswrapper[4716]: I1209 15:36:09.461003 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-799z8\" (UniqueName: \"kubernetes.io/projected/4df53e56-77ad-4210-96da-60de5f7da2ec-kube-api-access-799z8\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:09 crc kubenswrapper[4716]: I1209 15:36:09.461019 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4df53e56-77ad-4210-96da-60de5f7da2ec-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:09 crc kubenswrapper[4716]: I1209 15:36:09.461031 4716 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4df53e56-77ad-4210-96da-60de5f7da2ec-logs\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:09 crc kubenswrapper[4716]: I1209 15:36:09.849418 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4df53e56-77ad-4210-96da-60de5f7da2ec","Type":"ContainerDied","Data":"c79aa102422f8ef8a39562628b3e5d14086cca56aa514b1fd639c9fa1007ff59"} Dec 09 15:36:09 crc kubenswrapper[4716]: I1209 15:36:09.849472 4716 scope.go:117] "RemoveContainer" containerID="c3657509e6b9f17f36de439ca7a6d4b9ea30c9ef780dd4102b006505401485b3" Dec 09 15:36:09 crc kubenswrapper[4716]: I1209 15:36:09.849518 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 09 15:36:09 crc kubenswrapper[4716]: I1209 15:36:09.880863 4716 scope.go:117] "RemoveContainer" containerID="b121f4c3a4c6d1dbca956750cca146c49c684c1175056bd450ce6ccf11331730" Dec 09 15:36:09 crc kubenswrapper[4716]: I1209 15:36:09.891395 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 09 15:36:09 crc kubenswrapper[4716]: I1209 15:36:09.912428 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Dec 09 15:36:09 crc kubenswrapper[4716]: I1209 15:36:09.929795 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Dec 09 15:36:09 crc kubenswrapper[4716]: E1209 15:36:09.930768 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4df53e56-77ad-4210-96da-60de5f7da2ec" containerName="nova-api-api" Dec 09 15:36:09 crc kubenswrapper[4716]: I1209 15:36:09.930890 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="4df53e56-77ad-4210-96da-60de5f7da2ec" containerName="nova-api-api" Dec 09 15:36:09 crc kubenswrapper[4716]: E1209 15:36:09.931001 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4df53e56-77ad-4210-96da-60de5f7da2ec" containerName="nova-api-log" Dec 09 15:36:09 crc kubenswrapper[4716]: I1209 15:36:09.931086 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="4df53e56-77ad-4210-96da-60de5f7da2ec" containerName="nova-api-log" Dec 09 15:36:09 crc kubenswrapper[4716]: I1209 15:36:09.931449 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="4df53e56-77ad-4210-96da-60de5f7da2ec" containerName="nova-api-log" Dec 09 15:36:09 crc kubenswrapper[4716]: I1209 15:36:09.931567 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="4df53e56-77ad-4210-96da-60de5f7da2ec" containerName="nova-api-api" Dec 09 15:36:09 crc kubenswrapper[4716]: I1209 15:36:09.933144 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 09 15:36:09 crc kubenswrapper[4716]: I1209 15:36:09.937516 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Dec 09 15:36:09 crc kubenswrapper[4716]: I1209 15:36:09.937597 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Dec 09 15:36:09 crc kubenswrapper[4716]: I1209 15:36:09.944051 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 09 15:36:09 crc kubenswrapper[4716]: I1209 15:36:09.944177 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Dec 09 15:36:10 crc kubenswrapper[4716]: I1209 15:36:10.076127 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf841ea1-b016-46df-ba7a-8d5c97748eca-config-data\") pod \"nova-api-0\" (UID: \"bf841ea1-b016-46df-ba7a-8d5c97748eca\") " pod="openstack/nova-api-0" Dec 09 15:36:10 crc kubenswrapper[4716]: I1209 15:36:10.076259 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bf841ea1-b016-46df-ba7a-8d5c97748eca-logs\") pod \"nova-api-0\" (UID: \"bf841ea1-b016-46df-ba7a-8d5c97748eca\") " pod="openstack/nova-api-0" Dec 09 15:36:10 crc kubenswrapper[4716]: I1209 15:36:10.076287 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf841ea1-b016-46df-ba7a-8d5c97748eca-internal-tls-certs\") pod \"nova-api-0\" (UID: \"bf841ea1-b016-46df-ba7a-8d5c97748eca\") " pod="openstack/nova-api-0" Dec 09 15:36:10 crc kubenswrapper[4716]: I1209 15:36:10.076307 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf841ea1-b016-46df-ba7a-8d5c97748eca-public-tls-certs\") pod \"nova-api-0\" (UID: \"bf841ea1-b016-46df-ba7a-8d5c97748eca\") " pod="openstack/nova-api-0" Dec 09 15:36:10 crc kubenswrapper[4716]: I1209 15:36:10.076344 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf841ea1-b016-46df-ba7a-8d5c97748eca-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"bf841ea1-b016-46df-ba7a-8d5c97748eca\") " pod="openstack/nova-api-0" Dec 09 15:36:10 crc kubenswrapper[4716]: I1209 15:36:10.076474 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8whzb\" (UniqueName: \"kubernetes.io/projected/bf841ea1-b016-46df-ba7a-8d5c97748eca-kube-api-access-8whzb\") pod \"nova-api-0\" (UID: \"bf841ea1-b016-46df-ba7a-8d5c97748eca\") " pod="openstack/nova-api-0" Dec 09 15:36:10 crc kubenswrapper[4716]: I1209 15:36:10.178573 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8whzb\" (UniqueName: \"kubernetes.io/projected/bf841ea1-b016-46df-ba7a-8d5c97748eca-kube-api-access-8whzb\") pod \"nova-api-0\" (UID: \"bf841ea1-b016-46df-ba7a-8d5c97748eca\") " pod="openstack/nova-api-0" Dec 09 15:36:10 crc kubenswrapper[4716]: I1209 15:36:10.178642 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf841ea1-b016-46df-ba7a-8d5c97748eca-config-data\") pod \"nova-api-0\" (UID: \"bf841ea1-b016-46df-ba7a-8d5c97748eca\") " pod="openstack/nova-api-0" Dec 09 15:36:10 crc kubenswrapper[4716]: I1209 15:36:10.178739 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bf841ea1-b016-46df-ba7a-8d5c97748eca-logs\") pod \"nova-api-0\" (UID: \"bf841ea1-b016-46df-ba7a-8d5c97748eca\") " pod="openstack/nova-api-0" Dec 09 15:36:10 crc kubenswrapper[4716]: I1209 15:36:10.178758 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf841ea1-b016-46df-ba7a-8d5c97748eca-internal-tls-certs\") pod \"nova-api-0\" (UID: \"bf841ea1-b016-46df-ba7a-8d5c97748eca\") " pod="openstack/nova-api-0" Dec 09 15:36:10 crc kubenswrapper[4716]: I1209 15:36:10.178778 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf841ea1-b016-46df-ba7a-8d5c97748eca-public-tls-certs\") pod \"nova-api-0\" (UID: \"bf841ea1-b016-46df-ba7a-8d5c97748eca\") " pod="openstack/nova-api-0" Dec 09 15:36:10 crc kubenswrapper[4716]: I1209 15:36:10.178818 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf841ea1-b016-46df-ba7a-8d5c97748eca-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"bf841ea1-b016-46df-ba7a-8d5c97748eca\") " pod="openstack/nova-api-0" Dec 09 15:36:10 crc kubenswrapper[4716]: I1209 15:36:10.180090 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bf841ea1-b016-46df-ba7a-8d5c97748eca-logs\") pod \"nova-api-0\" (UID: \"bf841ea1-b016-46df-ba7a-8d5c97748eca\") " pod="openstack/nova-api-0" Dec 09 15:36:10 crc kubenswrapper[4716]: I1209 15:36:10.187350 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf841ea1-b016-46df-ba7a-8d5c97748eca-internal-tls-certs\") pod \"nova-api-0\" (UID: \"bf841ea1-b016-46df-ba7a-8d5c97748eca\") " pod="openstack/nova-api-0" Dec 09 15:36:10 crc kubenswrapper[4716]: I1209 15:36:10.187431 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf841ea1-b016-46df-ba7a-8d5c97748eca-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"bf841ea1-b016-46df-ba7a-8d5c97748eca\") " pod="openstack/nova-api-0" Dec 09 15:36:10 crc kubenswrapper[4716]: I1209 15:36:10.193135 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf841ea1-b016-46df-ba7a-8d5c97748eca-public-tls-certs\") pod \"nova-api-0\" (UID: \"bf841ea1-b016-46df-ba7a-8d5c97748eca\") " pod="openstack/nova-api-0" Dec 09 15:36:10 crc kubenswrapper[4716]: I1209 15:36:10.193341 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf841ea1-b016-46df-ba7a-8d5c97748eca-config-data\") pod \"nova-api-0\" (UID: \"bf841ea1-b016-46df-ba7a-8d5c97748eca\") " pod="openstack/nova-api-0" Dec 09 15:36:10 crc kubenswrapper[4716]: I1209 15:36:10.199826 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8whzb\" (UniqueName: \"kubernetes.io/projected/bf841ea1-b016-46df-ba7a-8d5c97748eca-kube-api-access-8whzb\") pod \"nova-api-0\" (UID: \"bf841ea1-b016-46df-ba7a-8d5c97748eca\") " pod="openstack/nova-api-0" Dec 09 15:36:10 crc kubenswrapper[4716]: I1209 15:36:10.259253 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 09 15:36:10 crc kubenswrapper[4716]: I1209 15:36:10.731082 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 09 15:36:10 crc kubenswrapper[4716]: W1209 15:36:10.741345 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbf841ea1_b016_46df_ba7a_8d5c97748eca.slice/crio-b0ec7edd1994dfb343d01f1fbc8e012d3aa86ac349a7908aede5ee161fb6e35b WatchSource:0}: Error finding container b0ec7edd1994dfb343d01f1fbc8e012d3aa86ac349a7908aede5ee161fb6e35b: Status 404 returned error can't find the container with id b0ec7edd1994dfb343d01f1fbc8e012d3aa86ac349a7908aede5ee161fb6e35b Dec 09 15:36:10 crc kubenswrapper[4716]: I1209 15:36:10.873936 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"bf841ea1-b016-46df-ba7a-8d5c97748eca","Type":"ContainerStarted","Data":"b0ec7edd1994dfb343d01f1fbc8e012d3aa86ac349a7908aede5ee161fb6e35b"} Dec 09 15:36:11 crc kubenswrapper[4716]: I1209 15:36:11.227479 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4df53e56-77ad-4210-96da-60de5f7da2ec" path="/var/lib/kubelet/pods/4df53e56-77ad-4210-96da-60de5f7da2ec/volumes" Dec 09 15:36:11 crc kubenswrapper[4716]: I1209 15:36:11.890026 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"bf841ea1-b016-46df-ba7a-8d5c97748eca","Type":"ContainerStarted","Data":"1599b215a1636d489ec4f1124b01861be276b86f4c020b43bb3a4d4695d84279"} Dec 09 15:36:11 crc kubenswrapper[4716]: I1209 15:36:11.893229 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"890bfa10-578a-4a64-8829-b2898dd4e02a","Type":"ContainerStarted","Data":"35256f2291ecc9c70fbd323d34d8e7201c8013357ceeea56685c750b50e6ba33"} Dec 09 15:36:12 crc kubenswrapper[4716]: I1209 15:36:12.434829 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6b7bbf7cf9-tngkz" Dec 09 15:36:12 crc kubenswrapper[4716]: I1209 15:36:12.544556 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-9b86998b5-6ccd6"] Dec 09 15:36:12 crc kubenswrapper[4716]: I1209 15:36:12.545604 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-9b86998b5-6ccd6" podUID="e84c448d-0f33-44b9-a68d-5451111935f2" containerName="dnsmasq-dns" containerID="cri-o://94e4d56923bc1f77d9ab2429df23c74d42758061a35a2ababf498a3bb464ef50" gracePeriod=10 Dec 09 15:36:12 crc kubenswrapper[4716]: I1209 15:36:12.914470 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"890bfa10-578a-4a64-8829-b2898dd4e02a","Type":"ContainerStarted","Data":"10cbf7908a1b4272450e9e0370647ee92ba9c3d4f24bfafa1a7f1d89b2d0b2dc"} Dec 09 15:36:12 crc kubenswrapper[4716]: I1209 15:36:12.917109 4716 generic.go:334] "Generic (PLEG): container finished" podID="e84c448d-0f33-44b9-a68d-5451111935f2" containerID="94e4d56923bc1f77d9ab2429df23c74d42758061a35a2ababf498a3bb464ef50" exitCode=0 Dec 09 15:36:12 crc kubenswrapper[4716]: I1209 15:36:12.917219 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-9b86998b5-6ccd6" event={"ID":"e84c448d-0f33-44b9-a68d-5451111935f2","Type":"ContainerDied","Data":"94e4d56923bc1f77d9ab2429df23c74d42758061a35a2ababf498a3bb464ef50"} Dec 09 15:36:12 crc kubenswrapper[4716]: I1209 15:36:12.920721 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"bf841ea1-b016-46df-ba7a-8d5c97748eca","Type":"ContainerStarted","Data":"980c60e5f4ec95078821b6ff9ab591041a7b75cedf57a8f4897a6f56fbbd851f"} Dec 09 15:36:12 crc kubenswrapper[4716]: I1209 15:36:12.972866 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.972837842 podStartE2EDuration="3.972837842s" podCreationTimestamp="2025-12-09 15:36:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:36:12.940661 +0000 UTC m=+1660.095404988" watchObservedRunningTime="2025-12-09 15:36:12.972837842 +0000 UTC m=+1660.127581830" Dec 09 15:36:13 crc kubenswrapper[4716]: I1209 15:36:13.264450 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-9b86998b5-6ccd6" Dec 09 15:36:13 crc kubenswrapper[4716]: I1209 15:36:13.374025 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e84c448d-0f33-44b9-a68d-5451111935f2-config\") pod \"e84c448d-0f33-44b9-a68d-5451111935f2\" (UID: \"e84c448d-0f33-44b9-a68d-5451111935f2\") " Dec 09 15:36:13 crc kubenswrapper[4716]: I1209 15:36:13.374415 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e84c448d-0f33-44b9-a68d-5451111935f2-dns-swift-storage-0\") pod \"e84c448d-0f33-44b9-a68d-5451111935f2\" (UID: \"e84c448d-0f33-44b9-a68d-5451111935f2\") " Dec 09 15:36:13 crc kubenswrapper[4716]: I1209 15:36:13.374532 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e84c448d-0f33-44b9-a68d-5451111935f2-ovsdbserver-nb\") pod \"e84c448d-0f33-44b9-a68d-5451111935f2\" (UID: \"e84c448d-0f33-44b9-a68d-5451111935f2\") " Dec 09 15:36:13 crc kubenswrapper[4716]: I1209 15:36:13.374779 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e84c448d-0f33-44b9-a68d-5451111935f2-dns-svc\") pod \"e84c448d-0f33-44b9-a68d-5451111935f2\" (UID: \"e84c448d-0f33-44b9-a68d-5451111935f2\") " Dec 09 15:36:13 crc kubenswrapper[4716]: I1209 15:36:13.374883 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e84c448d-0f33-44b9-a68d-5451111935f2-ovsdbserver-sb\") pod \"e84c448d-0f33-44b9-a68d-5451111935f2\" (UID: \"e84c448d-0f33-44b9-a68d-5451111935f2\") " Dec 09 15:36:13 crc kubenswrapper[4716]: I1209 15:36:13.375015 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q86n7\" (UniqueName: \"kubernetes.io/projected/e84c448d-0f33-44b9-a68d-5451111935f2-kube-api-access-q86n7\") pod \"e84c448d-0f33-44b9-a68d-5451111935f2\" (UID: \"e84c448d-0f33-44b9-a68d-5451111935f2\") " Dec 09 15:36:13 crc kubenswrapper[4716]: I1209 15:36:13.386857 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e84c448d-0f33-44b9-a68d-5451111935f2-kube-api-access-q86n7" (OuterVolumeSpecName: "kube-api-access-q86n7") pod "e84c448d-0f33-44b9-a68d-5451111935f2" (UID: "e84c448d-0f33-44b9-a68d-5451111935f2"). InnerVolumeSpecName "kube-api-access-q86n7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:36:13 crc kubenswrapper[4716]: I1209 15:36:13.478104 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q86n7\" (UniqueName: \"kubernetes.io/projected/e84c448d-0f33-44b9-a68d-5451111935f2-kube-api-access-q86n7\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:13 crc kubenswrapper[4716]: I1209 15:36:13.479662 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e84c448d-0f33-44b9-a68d-5451111935f2-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e84c448d-0f33-44b9-a68d-5451111935f2" (UID: "e84c448d-0f33-44b9-a68d-5451111935f2"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:36:13 crc kubenswrapper[4716]: I1209 15:36:13.510256 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e84c448d-0f33-44b9-a68d-5451111935f2-config" (OuterVolumeSpecName: "config") pod "e84c448d-0f33-44b9-a68d-5451111935f2" (UID: "e84c448d-0f33-44b9-a68d-5451111935f2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:36:13 crc kubenswrapper[4716]: I1209 15:36:13.520324 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e84c448d-0f33-44b9-a68d-5451111935f2-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "e84c448d-0f33-44b9-a68d-5451111935f2" (UID: "e84c448d-0f33-44b9-a68d-5451111935f2"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:36:13 crc kubenswrapper[4716]: I1209 15:36:13.534208 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e84c448d-0f33-44b9-a68d-5451111935f2-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e84c448d-0f33-44b9-a68d-5451111935f2" (UID: "e84c448d-0f33-44b9-a68d-5451111935f2"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:36:13 crc kubenswrapper[4716]: I1209 15:36:13.536006 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e84c448d-0f33-44b9-a68d-5451111935f2-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e84c448d-0f33-44b9-a68d-5451111935f2" (UID: "e84c448d-0f33-44b9-a68d-5451111935f2"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:36:13 crc kubenswrapper[4716]: I1209 15:36:13.584011 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e84c448d-0f33-44b9-a68d-5451111935f2-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:13 crc kubenswrapper[4716]: I1209 15:36:13.584067 4716 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e84c448d-0f33-44b9-a68d-5451111935f2-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:13 crc kubenswrapper[4716]: I1209 15:36:13.584081 4716 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e84c448d-0f33-44b9-a68d-5451111935f2-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:13 crc kubenswrapper[4716]: I1209 15:36:13.584090 4716 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e84c448d-0f33-44b9-a68d-5451111935f2-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:13 crc kubenswrapper[4716]: I1209 15:36:13.584099 4716 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e84c448d-0f33-44b9-a68d-5451111935f2-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:13 crc kubenswrapper[4716]: I1209 15:36:13.936657 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-9b86998b5-6ccd6" Dec 09 15:36:13 crc kubenswrapper[4716]: I1209 15:36:13.936669 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-9b86998b5-6ccd6" event={"ID":"e84c448d-0f33-44b9-a68d-5451111935f2","Type":"ContainerDied","Data":"d4f26d2a862fe2de1137af69eb5027dc87fd5aaf282efd55c29e1a4d3638ceaa"} Dec 09 15:36:13 crc kubenswrapper[4716]: I1209 15:36:13.936744 4716 scope.go:117] "RemoveContainer" containerID="94e4d56923bc1f77d9ab2429df23c74d42758061a35a2ababf498a3bb464ef50" Dec 09 15:36:13 crc kubenswrapper[4716]: I1209 15:36:13.948027 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"890bfa10-578a-4a64-8829-b2898dd4e02a","Type":"ContainerStarted","Data":"a3180779017091dd283d6463f60890dd7527f3e991b2f5c90d5137befe4db645"} Dec 09 15:36:14 crc kubenswrapper[4716]: I1209 15:36:14.004251 4716 scope.go:117] "RemoveContainer" containerID="8ef1e862b58f4300d19931af74b351b6ad8cacfcde69e7d88ade78c765aaea41" Dec 09 15:36:14 crc kubenswrapper[4716]: I1209 15:36:14.105858 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-9b86998b5-6ccd6"] Dec 09 15:36:14 crc kubenswrapper[4716]: I1209 15:36:14.131442 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-9b86998b5-6ccd6"] Dec 09 15:36:14 crc kubenswrapper[4716]: I1209 15:36:14.399717 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Dec 09 15:36:14 crc kubenswrapper[4716]: I1209 15:36:14.424605 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Dec 09 15:36:14 crc kubenswrapper[4716]: I1209 15:36:14.980001 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Dec 09 15:36:15 crc kubenswrapper[4716]: I1209 15:36:15.147076 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-lnp7x"] Dec 09 15:36:15 crc kubenswrapper[4716]: E1209 15:36:15.148445 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e84c448d-0f33-44b9-a68d-5451111935f2" containerName="dnsmasq-dns" Dec 09 15:36:15 crc kubenswrapper[4716]: I1209 15:36:15.148466 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="e84c448d-0f33-44b9-a68d-5451111935f2" containerName="dnsmasq-dns" Dec 09 15:36:15 crc kubenswrapper[4716]: E1209 15:36:15.148520 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e84c448d-0f33-44b9-a68d-5451111935f2" containerName="init" Dec 09 15:36:15 crc kubenswrapper[4716]: I1209 15:36:15.148528 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="e84c448d-0f33-44b9-a68d-5451111935f2" containerName="init" Dec 09 15:36:15 crc kubenswrapper[4716]: I1209 15:36:15.149060 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="e84c448d-0f33-44b9-a68d-5451111935f2" containerName="dnsmasq-dns" Dec 09 15:36:15 crc kubenswrapper[4716]: I1209 15:36:15.150194 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-lnp7x" Dec 09 15:36:15 crc kubenswrapper[4716]: I1209 15:36:15.155220 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Dec 09 15:36:15 crc kubenswrapper[4716]: I1209 15:36:15.155659 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Dec 09 15:36:15 crc kubenswrapper[4716]: I1209 15:36:15.163574 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-lnp7x"] Dec 09 15:36:15 crc kubenswrapper[4716]: I1209 15:36:15.227997 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e84c448d-0f33-44b9-a68d-5451111935f2" path="/var/lib/kubelet/pods/e84c448d-0f33-44b9-a68d-5451111935f2/volumes" Dec 09 15:36:15 crc kubenswrapper[4716]: I1209 15:36:15.250560 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b24845fa-f1d0-405e-b329-674efd15b3c6-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-lnp7x\" (UID: \"b24845fa-f1d0-405e-b329-674efd15b3c6\") " pod="openstack/nova-cell1-cell-mapping-lnp7x" Dec 09 15:36:15 crc kubenswrapper[4716]: I1209 15:36:15.250955 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tfqxr\" (UniqueName: \"kubernetes.io/projected/b24845fa-f1d0-405e-b329-674efd15b3c6-kube-api-access-tfqxr\") pod \"nova-cell1-cell-mapping-lnp7x\" (UID: \"b24845fa-f1d0-405e-b329-674efd15b3c6\") " pod="openstack/nova-cell1-cell-mapping-lnp7x" Dec 09 15:36:15 crc kubenswrapper[4716]: I1209 15:36:15.251130 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b24845fa-f1d0-405e-b329-674efd15b3c6-scripts\") pod \"nova-cell1-cell-mapping-lnp7x\" (UID: \"b24845fa-f1d0-405e-b329-674efd15b3c6\") " pod="openstack/nova-cell1-cell-mapping-lnp7x" Dec 09 15:36:15 crc kubenswrapper[4716]: I1209 15:36:15.251346 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b24845fa-f1d0-405e-b329-674efd15b3c6-config-data\") pod \"nova-cell1-cell-mapping-lnp7x\" (UID: \"b24845fa-f1d0-405e-b329-674efd15b3c6\") " pod="openstack/nova-cell1-cell-mapping-lnp7x" Dec 09 15:36:15 crc kubenswrapper[4716]: I1209 15:36:15.353846 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b24845fa-f1d0-405e-b329-674efd15b3c6-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-lnp7x\" (UID: \"b24845fa-f1d0-405e-b329-674efd15b3c6\") " pod="openstack/nova-cell1-cell-mapping-lnp7x" Dec 09 15:36:15 crc kubenswrapper[4716]: I1209 15:36:15.354225 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tfqxr\" (UniqueName: \"kubernetes.io/projected/b24845fa-f1d0-405e-b329-674efd15b3c6-kube-api-access-tfqxr\") pod \"nova-cell1-cell-mapping-lnp7x\" (UID: \"b24845fa-f1d0-405e-b329-674efd15b3c6\") " pod="openstack/nova-cell1-cell-mapping-lnp7x" Dec 09 15:36:15 crc kubenswrapper[4716]: I1209 15:36:15.354413 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b24845fa-f1d0-405e-b329-674efd15b3c6-scripts\") pod \"nova-cell1-cell-mapping-lnp7x\" (UID: \"b24845fa-f1d0-405e-b329-674efd15b3c6\") " pod="openstack/nova-cell1-cell-mapping-lnp7x" Dec 09 15:36:15 crc kubenswrapper[4716]: I1209 15:36:15.354556 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b24845fa-f1d0-405e-b329-674efd15b3c6-config-data\") pod \"nova-cell1-cell-mapping-lnp7x\" (UID: \"b24845fa-f1d0-405e-b329-674efd15b3c6\") " pod="openstack/nova-cell1-cell-mapping-lnp7x" Dec 09 15:36:15 crc kubenswrapper[4716]: I1209 15:36:15.361227 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b24845fa-f1d0-405e-b329-674efd15b3c6-config-data\") pod \"nova-cell1-cell-mapping-lnp7x\" (UID: \"b24845fa-f1d0-405e-b329-674efd15b3c6\") " pod="openstack/nova-cell1-cell-mapping-lnp7x" Dec 09 15:36:15 crc kubenswrapper[4716]: I1209 15:36:15.361648 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b24845fa-f1d0-405e-b329-674efd15b3c6-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-lnp7x\" (UID: \"b24845fa-f1d0-405e-b329-674efd15b3c6\") " pod="openstack/nova-cell1-cell-mapping-lnp7x" Dec 09 15:36:15 crc kubenswrapper[4716]: I1209 15:36:15.377373 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b24845fa-f1d0-405e-b329-674efd15b3c6-scripts\") pod \"nova-cell1-cell-mapping-lnp7x\" (UID: \"b24845fa-f1d0-405e-b329-674efd15b3c6\") " pod="openstack/nova-cell1-cell-mapping-lnp7x" Dec 09 15:36:15 crc kubenswrapper[4716]: I1209 15:36:15.380714 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tfqxr\" (UniqueName: \"kubernetes.io/projected/b24845fa-f1d0-405e-b329-674efd15b3c6-kube-api-access-tfqxr\") pod \"nova-cell1-cell-mapping-lnp7x\" (UID: \"b24845fa-f1d0-405e-b329-674efd15b3c6\") " pod="openstack/nova-cell1-cell-mapping-lnp7x" Dec 09 15:36:15 crc kubenswrapper[4716]: I1209 15:36:15.473595 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-lnp7x" Dec 09 15:36:16 crc kubenswrapper[4716]: I1209 15:36:16.012032 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-lnp7x"] Dec 09 15:36:16 crc kubenswrapper[4716]: I1209 15:36:16.992556 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-lnp7x" event={"ID":"b24845fa-f1d0-405e-b329-674efd15b3c6","Type":"ContainerStarted","Data":"d7698082c1bf432d676d295fd843194f0662f3667b8231750cf887d93598a62c"} Dec 09 15:36:16 crc kubenswrapper[4716]: I1209 15:36:16.993032 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-lnp7x" event={"ID":"b24845fa-f1d0-405e-b329-674efd15b3c6","Type":"ContainerStarted","Data":"40acc84c29ead567420bea9216da1eaaac3946c7bfeabc75fc764c92bfabcc69"} Dec 09 15:36:17 crc kubenswrapper[4716]: I1209 15:36:17.001083 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"890bfa10-578a-4a64-8829-b2898dd4e02a","Type":"ContainerStarted","Data":"bd85c13b06e519867a19e91109220c112981d6fb55edd5f54a8793b135718ca4"} Dec 09 15:36:17 crc kubenswrapper[4716]: I1209 15:36:17.001983 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 09 15:36:17 crc kubenswrapper[4716]: I1209 15:36:17.027394 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-lnp7x" podStartSLOduration=2.027363453 podStartE2EDuration="2.027363453s" podCreationTimestamp="2025-12-09 15:36:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:36:17.010035212 +0000 UTC m=+1664.164779200" watchObservedRunningTime="2025-12-09 15:36:17.027363453 +0000 UTC m=+1664.182107441" Dec 09 15:36:17 crc kubenswrapper[4716]: I1209 15:36:17.071887 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.633658386 podStartE2EDuration="10.071859714s" podCreationTimestamp="2025-12-09 15:36:07 +0000 UTC" firstStartedPulling="2025-12-09 15:36:08.820355725 +0000 UTC m=+1655.975099713" lastFinishedPulling="2025-12-09 15:36:16.258557053 +0000 UTC m=+1663.413301041" observedRunningTime="2025-12-09 15:36:17.039074205 +0000 UTC m=+1664.193818193" watchObservedRunningTime="2025-12-09 15:36:17.071859714 +0000 UTC m=+1664.226603712" Dec 09 15:36:17 crc kubenswrapper[4716]: I1209 15:36:17.922384 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 15:36:17 crc kubenswrapper[4716]: I1209 15:36:17.922772 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 15:36:20 crc kubenswrapper[4716]: I1209 15:36:20.260047 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 09 15:36:20 crc kubenswrapper[4716]: I1209 15:36:20.261499 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 09 15:36:21 crc kubenswrapper[4716]: I1209 15:36:21.277000 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="bf841ea1-b016-46df-ba7a-8d5c97748eca" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.253:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 09 15:36:21 crc kubenswrapper[4716]: I1209 15:36:21.277124 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="bf841ea1-b016-46df-ba7a-8d5c97748eca" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.253:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 09 15:36:22 crc kubenswrapper[4716]: I1209 15:36:22.057768 4716 generic.go:334] "Generic (PLEG): container finished" podID="b24845fa-f1d0-405e-b329-674efd15b3c6" containerID="d7698082c1bf432d676d295fd843194f0662f3667b8231750cf887d93598a62c" exitCode=0 Dec 09 15:36:22 crc kubenswrapper[4716]: I1209 15:36:22.058072 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-lnp7x" event={"ID":"b24845fa-f1d0-405e-b329-674efd15b3c6","Type":"ContainerDied","Data":"d7698082c1bf432d676d295fd843194f0662f3667b8231750cf887d93598a62c"} Dec 09 15:36:23 crc kubenswrapper[4716]: I1209 15:36:23.507536 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-lnp7x" Dec 09 15:36:23 crc kubenswrapper[4716]: I1209 15:36:23.581664 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b24845fa-f1d0-405e-b329-674efd15b3c6-scripts\") pod \"b24845fa-f1d0-405e-b329-674efd15b3c6\" (UID: \"b24845fa-f1d0-405e-b329-674efd15b3c6\") " Dec 09 15:36:23 crc kubenswrapper[4716]: I1209 15:36:23.582251 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b24845fa-f1d0-405e-b329-674efd15b3c6-config-data\") pod \"b24845fa-f1d0-405e-b329-674efd15b3c6\" (UID: \"b24845fa-f1d0-405e-b329-674efd15b3c6\") " Dec 09 15:36:23 crc kubenswrapper[4716]: I1209 15:36:23.582606 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b24845fa-f1d0-405e-b329-674efd15b3c6-combined-ca-bundle\") pod \"b24845fa-f1d0-405e-b329-674efd15b3c6\" (UID: \"b24845fa-f1d0-405e-b329-674efd15b3c6\") " Dec 09 15:36:23 crc kubenswrapper[4716]: I1209 15:36:23.582709 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tfqxr\" (UniqueName: \"kubernetes.io/projected/b24845fa-f1d0-405e-b329-674efd15b3c6-kube-api-access-tfqxr\") pod \"b24845fa-f1d0-405e-b329-674efd15b3c6\" (UID: \"b24845fa-f1d0-405e-b329-674efd15b3c6\") " Dec 09 15:36:23 crc kubenswrapper[4716]: I1209 15:36:23.588822 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b24845fa-f1d0-405e-b329-674efd15b3c6-scripts" (OuterVolumeSpecName: "scripts") pod "b24845fa-f1d0-405e-b329-674efd15b3c6" (UID: "b24845fa-f1d0-405e-b329-674efd15b3c6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:36:23 crc kubenswrapper[4716]: I1209 15:36:23.589006 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b24845fa-f1d0-405e-b329-674efd15b3c6-kube-api-access-tfqxr" (OuterVolumeSpecName: "kube-api-access-tfqxr") pod "b24845fa-f1d0-405e-b329-674efd15b3c6" (UID: "b24845fa-f1d0-405e-b329-674efd15b3c6"). InnerVolumeSpecName "kube-api-access-tfqxr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:36:23 crc kubenswrapper[4716]: I1209 15:36:23.624995 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b24845fa-f1d0-405e-b329-674efd15b3c6-config-data" (OuterVolumeSpecName: "config-data") pod "b24845fa-f1d0-405e-b329-674efd15b3c6" (UID: "b24845fa-f1d0-405e-b329-674efd15b3c6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:36:23 crc kubenswrapper[4716]: I1209 15:36:23.628214 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b24845fa-f1d0-405e-b329-674efd15b3c6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b24845fa-f1d0-405e-b329-674efd15b3c6" (UID: "b24845fa-f1d0-405e-b329-674efd15b3c6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:36:23 crc kubenswrapper[4716]: I1209 15:36:23.686833 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b24845fa-f1d0-405e-b329-674efd15b3c6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:23 crc kubenswrapper[4716]: I1209 15:36:23.687113 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tfqxr\" (UniqueName: \"kubernetes.io/projected/b24845fa-f1d0-405e-b329-674efd15b3c6-kube-api-access-tfqxr\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:23 crc kubenswrapper[4716]: I1209 15:36:23.687187 4716 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b24845fa-f1d0-405e-b329-674efd15b3c6-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:23 crc kubenswrapper[4716]: I1209 15:36:23.687250 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b24845fa-f1d0-405e-b329-674efd15b3c6-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:24 crc kubenswrapper[4716]: I1209 15:36:24.089182 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-lnp7x" event={"ID":"b24845fa-f1d0-405e-b329-674efd15b3c6","Type":"ContainerDied","Data":"40acc84c29ead567420bea9216da1eaaac3946c7bfeabc75fc764c92bfabcc69"} Dec 09 15:36:24 crc kubenswrapper[4716]: I1209 15:36:24.089234 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-lnp7x" Dec 09 15:36:24 crc kubenswrapper[4716]: I1209 15:36:24.089240 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="40acc84c29ead567420bea9216da1eaaac3946c7bfeabc75fc764c92bfabcc69" Dec 09 15:36:24 crc kubenswrapper[4716]: I1209 15:36:24.274513 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 09 15:36:24 crc kubenswrapper[4716]: I1209 15:36:24.275836 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="bf841ea1-b016-46df-ba7a-8d5c97748eca" containerName="nova-api-api" containerID="cri-o://980c60e5f4ec95078821b6ff9ab591041a7b75cedf57a8f4897a6f56fbbd851f" gracePeriod=30 Dec 09 15:36:24 crc kubenswrapper[4716]: I1209 15:36:24.275996 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="bf841ea1-b016-46df-ba7a-8d5c97748eca" containerName="nova-api-log" containerID="cri-o://1599b215a1636d489ec4f1124b01861be276b86f4c020b43bb3a4d4695d84279" gracePeriod=30 Dec 09 15:36:24 crc kubenswrapper[4716]: I1209 15:36:24.351667 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 09 15:36:24 crc kubenswrapper[4716]: I1209 15:36:24.352033 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="97431237-6850-496d-a176-2e8359c8e0e5" containerName="nova-metadata-log" containerID="cri-o://759c32ddb8bfcf642b77002cdb58d17ef69487357834a08e9b415fc324c10a9d" gracePeriod=30 Dec 09 15:36:24 crc kubenswrapper[4716]: I1209 15:36:24.352157 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="97431237-6850-496d-a176-2e8359c8e0e5" containerName="nova-metadata-metadata" containerID="cri-o://f01fc44ce14d9f74dd2ca66db0873641746a5d85822aed2a70b094f88cb215de" gracePeriod=30 Dec 09 15:36:24 crc kubenswrapper[4716]: I1209 15:36:24.374707 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Dec 09 15:36:24 crc kubenswrapper[4716]: I1209 15:36:24.375009 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="dd08e4af-c44e-421a-9547-39cd45eb46d2" containerName="nova-scheduler-scheduler" containerID="cri-o://89716d2237c6b8bfb35f35f26e47a735e221d64c751f1ab04d3aa30071330cbd" gracePeriod=30 Dec 09 15:36:25 crc kubenswrapper[4716]: I1209 15:36:25.105920 4716 generic.go:334] "Generic (PLEG): container finished" podID="97431237-6850-496d-a176-2e8359c8e0e5" containerID="759c32ddb8bfcf642b77002cdb58d17ef69487357834a08e9b415fc324c10a9d" exitCode=143 Dec 09 15:36:25 crc kubenswrapper[4716]: I1209 15:36:25.105962 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"97431237-6850-496d-a176-2e8359c8e0e5","Type":"ContainerDied","Data":"759c32ddb8bfcf642b77002cdb58d17ef69487357834a08e9b415fc324c10a9d"} Dec 09 15:36:25 crc kubenswrapper[4716]: I1209 15:36:25.109292 4716 generic.go:334] "Generic (PLEG): container finished" podID="e61209a4-0a5f-4986-8d3b-e22beea4379a" containerID="c21055fe884bad569cc527d99cd0f5391a4494ee044f5cba4c1c1d4d34184db4" exitCode=137 Dec 09 15:36:25 crc kubenswrapper[4716]: I1209 15:36:25.109334 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e61209a4-0a5f-4986-8d3b-e22beea4379a","Type":"ContainerDied","Data":"c21055fe884bad569cc527d99cd0f5391a4494ee044f5cba4c1c1d4d34184db4"} Dec 09 15:36:25 crc kubenswrapper[4716]: I1209 15:36:25.685725 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Dec 09 15:36:25 crc kubenswrapper[4716]: I1209 15:36:25.834982 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e61209a4-0a5f-4986-8d3b-e22beea4379a-config-data\") pod \"e61209a4-0a5f-4986-8d3b-e22beea4379a\" (UID: \"e61209a4-0a5f-4986-8d3b-e22beea4379a\") " Dec 09 15:36:25 crc kubenswrapper[4716]: I1209 15:36:25.835237 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e61209a4-0a5f-4986-8d3b-e22beea4379a-scripts\") pod \"e61209a4-0a5f-4986-8d3b-e22beea4379a\" (UID: \"e61209a4-0a5f-4986-8d3b-e22beea4379a\") " Dec 09 15:36:25 crc kubenswrapper[4716]: I1209 15:36:25.835379 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e61209a4-0a5f-4986-8d3b-e22beea4379a-combined-ca-bundle\") pod \"e61209a4-0a5f-4986-8d3b-e22beea4379a\" (UID: \"e61209a4-0a5f-4986-8d3b-e22beea4379a\") " Dec 09 15:36:25 crc kubenswrapper[4716]: I1209 15:36:25.835427 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-67b9f\" (UniqueName: \"kubernetes.io/projected/e61209a4-0a5f-4986-8d3b-e22beea4379a-kube-api-access-67b9f\") pod \"e61209a4-0a5f-4986-8d3b-e22beea4379a\" (UID: \"e61209a4-0a5f-4986-8d3b-e22beea4379a\") " Dec 09 15:36:25 crc kubenswrapper[4716]: I1209 15:36:25.873961 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e61209a4-0a5f-4986-8d3b-e22beea4379a-kube-api-access-67b9f" (OuterVolumeSpecName: "kube-api-access-67b9f") pod "e61209a4-0a5f-4986-8d3b-e22beea4379a" (UID: "e61209a4-0a5f-4986-8d3b-e22beea4379a"). InnerVolumeSpecName "kube-api-access-67b9f". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:36:25 crc kubenswrapper[4716]: I1209 15:36:25.874376 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e61209a4-0a5f-4986-8d3b-e22beea4379a-scripts" (OuterVolumeSpecName: "scripts") pod "e61209a4-0a5f-4986-8d3b-e22beea4379a" (UID: "e61209a4-0a5f-4986-8d3b-e22beea4379a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:36:25 crc kubenswrapper[4716]: I1209 15:36:25.943312 4716 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e61209a4-0a5f-4986-8d3b-e22beea4379a-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:25 crc kubenswrapper[4716]: I1209 15:36:25.943348 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-67b9f\" (UniqueName: \"kubernetes.io/projected/e61209a4-0a5f-4986-8d3b-e22beea4379a-kube-api-access-67b9f\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.099704 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e61209a4-0a5f-4986-8d3b-e22beea4379a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e61209a4-0a5f-4986-8d3b-e22beea4379a" (UID: "e61209a4-0a5f-4986-8d3b-e22beea4379a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.128051 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"e61209a4-0a5f-4986-8d3b-e22beea4379a","Type":"ContainerDied","Data":"3cdf61429908b93fb20e0cae30674b36dbe729ff7e0e78889baa940858f97153"} Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.128063 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.128112 4716 scope.go:117] "RemoveContainer" containerID="c21055fe884bad569cc527d99cd0f5391a4494ee044f5cba4c1c1d4d34184db4" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.132822 4716 generic.go:334] "Generic (PLEG): container finished" podID="bf841ea1-b016-46df-ba7a-8d5c97748eca" containerID="1599b215a1636d489ec4f1124b01861be276b86f4c020b43bb3a4d4695d84279" exitCode=143 Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.132870 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"bf841ea1-b016-46df-ba7a-8d5c97748eca","Type":"ContainerDied","Data":"1599b215a1636d489ec4f1124b01861be276b86f4c020b43bb3a4d4695d84279"} Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.138343 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e61209a4-0a5f-4986-8d3b-e22beea4379a-config-data" (OuterVolumeSpecName: "config-data") pod "e61209a4-0a5f-4986-8d3b-e22beea4379a" (UID: "e61209a4-0a5f-4986-8d3b-e22beea4379a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.160946 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e61209a4-0a5f-4986-8d3b-e22beea4379a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.160981 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e61209a4-0a5f-4986-8d3b-e22beea4379a-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.172815 4716 scope.go:117] "RemoveContainer" containerID="bd34a449bc798b2aa98b7707223bad7268b2aa94f25d1735eefd6a5d3b835e4c" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.205085 4716 scope.go:117] "RemoveContainer" containerID="45065179f0a8198e9355a29eb0b671792d64a78846103e61f6d7704eb09939c8" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.234866 4716 scope.go:117] "RemoveContainer" containerID="552515a36bd9262579215f02c970ef4c17108f6264c9a9fb52a174d336b6ebb1" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.528189 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.539013 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-0"] Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.556109 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Dec 09 15:36:26 crc kubenswrapper[4716]: E1209 15:36:26.556691 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e61209a4-0a5f-4986-8d3b-e22beea4379a" containerName="aodh-api" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.556718 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="e61209a4-0a5f-4986-8d3b-e22beea4379a" containerName="aodh-api" Dec 09 15:36:26 crc kubenswrapper[4716]: E1209 15:36:26.556743 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e61209a4-0a5f-4986-8d3b-e22beea4379a" containerName="aodh-notifier" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.556750 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="e61209a4-0a5f-4986-8d3b-e22beea4379a" containerName="aodh-notifier" Dec 09 15:36:26 crc kubenswrapper[4716]: E1209 15:36:26.556769 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e61209a4-0a5f-4986-8d3b-e22beea4379a" containerName="aodh-evaluator" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.556775 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="e61209a4-0a5f-4986-8d3b-e22beea4379a" containerName="aodh-evaluator" Dec 09 15:36:26 crc kubenswrapper[4716]: E1209 15:36:26.556797 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b24845fa-f1d0-405e-b329-674efd15b3c6" containerName="nova-manage" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.556803 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="b24845fa-f1d0-405e-b329-674efd15b3c6" containerName="nova-manage" Dec 09 15:36:26 crc kubenswrapper[4716]: E1209 15:36:26.556832 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e61209a4-0a5f-4986-8d3b-e22beea4379a" containerName="aodh-listener" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.556838 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="e61209a4-0a5f-4986-8d3b-e22beea4379a" containerName="aodh-listener" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.557073 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="e61209a4-0a5f-4986-8d3b-e22beea4379a" containerName="aodh-listener" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.557091 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="e61209a4-0a5f-4986-8d3b-e22beea4379a" containerName="aodh-notifier" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.557105 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="e61209a4-0a5f-4986-8d3b-e22beea4379a" containerName="aodh-evaluator" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.557120 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="b24845fa-f1d0-405e-b329-674efd15b3c6" containerName="nova-manage" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.557131 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="e61209a4-0a5f-4986-8d3b-e22beea4379a" containerName="aodh-api" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.559250 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.567152 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.567259 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-public-svc" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.567541 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.567718 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-internal-svc" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.575637 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.576043 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-pmtlh" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.580882 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/81faf0e1-4428-4ed2-8d1f-98d0fd11bc40-internal-tls-certs\") pod \"aodh-0\" (UID: \"81faf0e1-4428-4ed2-8d1f-98d0fd11bc40\") " pod="openstack/aodh-0" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.582854 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81faf0e1-4428-4ed2-8d1f-98d0fd11bc40-config-data\") pod \"aodh-0\" (UID: \"81faf0e1-4428-4ed2-8d1f-98d0fd11bc40\") " pod="openstack/aodh-0" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.582992 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ll25h\" (UniqueName: \"kubernetes.io/projected/81faf0e1-4428-4ed2-8d1f-98d0fd11bc40-kube-api-access-ll25h\") pod \"aodh-0\" (UID: \"81faf0e1-4428-4ed2-8d1f-98d0fd11bc40\") " pod="openstack/aodh-0" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.583223 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/81faf0e1-4428-4ed2-8d1f-98d0fd11bc40-scripts\") pod \"aodh-0\" (UID: \"81faf0e1-4428-4ed2-8d1f-98d0fd11bc40\") " pod="openstack/aodh-0" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.583328 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81faf0e1-4428-4ed2-8d1f-98d0fd11bc40-combined-ca-bundle\") pod \"aodh-0\" (UID: \"81faf0e1-4428-4ed2-8d1f-98d0fd11bc40\") " pod="openstack/aodh-0" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.583422 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/81faf0e1-4428-4ed2-8d1f-98d0fd11bc40-public-tls-certs\") pod \"aodh-0\" (UID: \"81faf0e1-4428-4ed2-8d1f-98d0fd11bc40\") " pod="openstack/aodh-0" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.685501 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81faf0e1-4428-4ed2-8d1f-98d0fd11bc40-combined-ca-bundle\") pod \"aodh-0\" (UID: \"81faf0e1-4428-4ed2-8d1f-98d0fd11bc40\") " pod="openstack/aodh-0" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.685587 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/81faf0e1-4428-4ed2-8d1f-98d0fd11bc40-public-tls-certs\") pod \"aodh-0\" (UID: \"81faf0e1-4428-4ed2-8d1f-98d0fd11bc40\") " pod="openstack/aodh-0" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.685723 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/81faf0e1-4428-4ed2-8d1f-98d0fd11bc40-internal-tls-certs\") pod \"aodh-0\" (UID: \"81faf0e1-4428-4ed2-8d1f-98d0fd11bc40\") " pod="openstack/aodh-0" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.685827 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81faf0e1-4428-4ed2-8d1f-98d0fd11bc40-config-data\") pod \"aodh-0\" (UID: \"81faf0e1-4428-4ed2-8d1f-98d0fd11bc40\") " pod="openstack/aodh-0" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.685872 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ll25h\" (UniqueName: \"kubernetes.io/projected/81faf0e1-4428-4ed2-8d1f-98d0fd11bc40-kube-api-access-ll25h\") pod \"aodh-0\" (UID: \"81faf0e1-4428-4ed2-8d1f-98d0fd11bc40\") " pod="openstack/aodh-0" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.685978 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/81faf0e1-4428-4ed2-8d1f-98d0fd11bc40-scripts\") pod \"aodh-0\" (UID: \"81faf0e1-4428-4ed2-8d1f-98d0fd11bc40\") " pod="openstack/aodh-0" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.693024 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/81faf0e1-4428-4ed2-8d1f-98d0fd11bc40-public-tls-certs\") pod \"aodh-0\" (UID: \"81faf0e1-4428-4ed2-8d1f-98d0fd11bc40\") " pod="openstack/aodh-0" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.693055 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81faf0e1-4428-4ed2-8d1f-98d0fd11bc40-combined-ca-bundle\") pod \"aodh-0\" (UID: \"81faf0e1-4428-4ed2-8d1f-98d0fd11bc40\") " pod="openstack/aodh-0" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.693025 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/81faf0e1-4428-4ed2-8d1f-98d0fd11bc40-internal-tls-certs\") pod \"aodh-0\" (UID: \"81faf0e1-4428-4ed2-8d1f-98d0fd11bc40\") " pod="openstack/aodh-0" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.693388 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/81faf0e1-4428-4ed2-8d1f-98d0fd11bc40-scripts\") pod \"aodh-0\" (UID: \"81faf0e1-4428-4ed2-8d1f-98d0fd11bc40\") " pod="openstack/aodh-0" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.696979 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81faf0e1-4428-4ed2-8d1f-98d0fd11bc40-config-data\") pod \"aodh-0\" (UID: \"81faf0e1-4428-4ed2-8d1f-98d0fd11bc40\") " pod="openstack/aodh-0" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.706790 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ll25h\" (UniqueName: \"kubernetes.io/projected/81faf0e1-4428-4ed2-8d1f-98d0fd11bc40-kube-api-access-ll25h\") pod \"aodh-0\" (UID: \"81faf0e1-4428-4ed2-8d1f-98d0fd11bc40\") " pod="openstack/aodh-0" Dec 09 15:36:26 crc kubenswrapper[4716]: I1209 15:36:26.937050 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Dec 09 15:36:27 crc kubenswrapper[4716]: I1209 15:36:27.228369 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e61209a4-0a5f-4986-8d3b-e22beea4379a" path="/var/lib/kubelet/pods/e61209a4-0a5f-4986-8d3b-e22beea4379a/volumes" Dec 09 15:36:27 crc kubenswrapper[4716]: I1209 15:36:27.462641 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Dec 09 15:36:27 crc kubenswrapper[4716]: W1209 15:36:27.464224 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod81faf0e1_4428_4ed2_8d1f_98d0fd11bc40.slice/crio-91019bb8d914cb39b3257fceaec644a642b41f346874106c5466cf7828c3bb05 WatchSource:0}: Error finding container 91019bb8d914cb39b3257fceaec644a642b41f346874106c5466cf7828c3bb05: Status 404 returned error can't find the container with id 91019bb8d914cb39b3257fceaec644a642b41f346874106c5466cf7828c3bb05 Dec 09 15:36:27 crc kubenswrapper[4716]: I1209 15:36:27.500993 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="97431237-6850-496d-a176-2e8359c8e0e5" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.244:8775/\": read tcp 10.217.0.2:49292->10.217.0.244:8775: read: connection reset by peer" Dec 09 15:36:27 crc kubenswrapper[4716]: I1209 15:36:27.501004 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="97431237-6850-496d-a176-2e8359c8e0e5" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.244:8775/\": read tcp 10.217.0.2:49290->10.217.0.244:8775: read: connection reset by peer" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.079378 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.122085 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.141109 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97431237-6850-496d-a176-2e8359c8e0e5-combined-ca-bundle\") pod \"97431237-6850-496d-a176-2e8359c8e0e5\" (UID: \"97431237-6850-496d-a176-2e8359c8e0e5\") " Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.141208 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97431237-6850-496d-a176-2e8359c8e0e5-config-data\") pod \"97431237-6850-496d-a176-2e8359c8e0e5\" (UID: \"97431237-6850-496d-a176-2e8359c8e0e5\") " Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.141229 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/97431237-6850-496d-a176-2e8359c8e0e5-nova-metadata-tls-certs\") pod \"97431237-6850-496d-a176-2e8359c8e0e5\" (UID: \"97431237-6850-496d-a176-2e8359c8e0e5\") " Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.141471 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97431237-6850-496d-a176-2e8359c8e0e5-logs\") pod \"97431237-6850-496d-a176-2e8359c8e0e5\" (UID: \"97431237-6850-496d-a176-2e8359c8e0e5\") " Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.141582 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5pr2h\" (UniqueName: \"kubernetes.io/projected/97431237-6850-496d-a176-2e8359c8e0e5-kube-api-access-5pr2h\") pod \"97431237-6850-496d-a176-2e8359c8e0e5\" (UID: \"97431237-6850-496d-a176-2e8359c8e0e5\") " Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.152286 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/97431237-6850-496d-a176-2e8359c8e0e5-logs" (OuterVolumeSpecName: "logs") pod "97431237-6850-496d-a176-2e8359c8e0e5" (UID: "97431237-6850-496d-a176-2e8359c8e0e5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.155387 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97431237-6850-496d-a176-2e8359c8e0e5-kube-api-access-5pr2h" (OuterVolumeSpecName: "kube-api-access-5pr2h") pod "97431237-6850-496d-a176-2e8359c8e0e5" (UID: "97431237-6850-496d-a176-2e8359c8e0e5"). InnerVolumeSpecName "kube-api-access-5pr2h". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.221573 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"81faf0e1-4428-4ed2-8d1f-98d0fd11bc40","Type":"ContainerStarted","Data":"91019bb8d914cb39b3257fceaec644a642b41f346874106c5466cf7828c3bb05"} Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.234164 4716 generic.go:334] "Generic (PLEG): container finished" podID="97431237-6850-496d-a176-2e8359c8e0e5" containerID="f01fc44ce14d9f74dd2ca66db0873641746a5d85822aed2a70b094f88cb215de" exitCode=0 Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.234284 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"97431237-6850-496d-a176-2e8359c8e0e5","Type":"ContainerDied","Data":"f01fc44ce14d9f74dd2ca66db0873641746a5d85822aed2a70b094f88cb215de"} Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.234319 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"97431237-6850-496d-a176-2e8359c8e0e5","Type":"ContainerDied","Data":"a0eddfd77cd259f1be99e56b3887b442c37cbe7905e5a3b187c1dc365f29736b"} Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.234339 4716 scope.go:117] "RemoveContainer" containerID="f01fc44ce14d9f74dd2ca66db0873641746a5d85822aed2a70b094f88cb215de" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.234566 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.240916 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97431237-6850-496d-a176-2e8359c8e0e5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "97431237-6850-496d-a176-2e8359c8e0e5" (UID: "97431237-6850-496d-a176-2e8359c8e0e5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.245012 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bf841ea1-b016-46df-ba7a-8d5c97748eca-logs\") pod \"bf841ea1-b016-46df-ba7a-8d5c97748eca\" (UID: \"bf841ea1-b016-46df-ba7a-8d5c97748eca\") " Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.245171 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf841ea1-b016-46df-ba7a-8d5c97748eca-public-tls-certs\") pod \"bf841ea1-b016-46df-ba7a-8d5c97748eca\" (UID: \"bf841ea1-b016-46df-ba7a-8d5c97748eca\") " Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.245210 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf841ea1-b016-46df-ba7a-8d5c97748eca-internal-tls-certs\") pod \"bf841ea1-b016-46df-ba7a-8d5c97748eca\" (UID: \"bf841ea1-b016-46df-ba7a-8d5c97748eca\") " Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.245290 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8whzb\" (UniqueName: \"kubernetes.io/projected/bf841ea1-b016-46df-ba7a-8d5c97748eca-kube-api-access-8whzb\") pod \"bf841ea1-b016-46df-ba7a-8d5c97748eca\" (UID: \"bf841ea1-b016-46df-ba7a-8d5c97748eca\") " Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.245354 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf841ea1-b016-46df-ba7a-8d5c97748eca-config-data\") pod \"bf841ea1-b016-46df-ba7a-8d5c97748eca\" (UID: \"bf841ea1-b016-46df-ba7a-8d5c97748eca\") " Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.245516 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf841ea1-b016-46df-ba7a-8d5c97748eca-combined-ca-bundle\") pod \"bf841ea1-b016-46df-ba7a-8d5c97748eca\" (UID: \"bf841ea1-b016-46df-ba7a-8d5c97748eca\") " Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.245907 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bf841ea1-b016-46df-ba7a-8d5c97748eca-logs" (OuterVolumeSpecName: "logs") pod "bf841ea1-b016-46df-ba7a-8d5c97748eca" (UID: "bf841ea1-b016-46df-ba7a-8d5c97748eca"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.250240 4716 generic.go:334] "Generic (PLEG): container finished" podID="bf841ea1-b016-46df-ba7a-8d5c97748eca" containerID="980c60e5f4ec95078821b6ff9ab591041a7b75cedf57a8f4897a6f56fbbd851f" exitCode=0 Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.250522 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"bf841ea1-b016-46df-ba7a-8d5c97748eca","Type":"ContainerDied","Data":"980c60e5f4ec95078821b6ff9ab591041a7b75cedf57a8f4897a6f56fbbd851f"} Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.251812 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"bf841ea1-b016-46df-ba7a-8d5c97748eca","Type":"ContainerDied","Data":"b0ec7edd1994dfb343d01f1fbc8e012d3aa86ac349a7908aede5ee161fb6e35b"} Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.256987 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf841ea1-b016-46df-ba7a-8d5c97748eca-kube-api-access-8whzb" (OuterVolumeSpecName: "kube-api-access-8whzb") pod "bf841ea1-b016-46df-ba7a-8d5c97748eca" (UID: "bf841ea1-b016-46df-ba7a-8d5c97748eca"). InnerVolumeSpecName "kube-api-access-8whzb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.271588 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.293064 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97431237-6850-496d-a176-2e8359c8e0e5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.298085 4716 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97431237-6850-496d-a176-2e8359c8e0e5-logs\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.298144 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5pr2h\" (UniqueName: \"kubernetes.io/projected/97431237-6850-496d-a176-2e8359c8e0e5-kube-api-access-5pr2h\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.298169 4716 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bf841ea1-b016-46df-ba7a-8d5c97748eca-logs\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.304867 4716 scope.go:117] "RemoveContainer" containerID="759c32ddb8bfcf642b77002cdb58d17ef69487357834a08e9b415fc324c10a9d" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.350158 4716 scope.go:117] "RemoveContainer" containerID="f01fc44ce14d9f74dd2ca66db0873641746a5d85822aed2a70b094f88cb215de" Dec 09 15:36:28 crc kubenswrapper[4716]: E1209 15:36:28.354292 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f01fc44ce14d9f74dd2ca66db0873641746a5d85822aed2a70b094f88cb215de\": container with ID starting with f01fc44ce14d9f74dd2ca66db0873641746a5d85822aed2a70b094f88cb215de not found: ID does not exist" containerID="f01fc44ce14d9f74dd2ca66db0873641746a5d85822aed2a70b094f88cb215de" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.354370 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f01fc44ce14d9f74dd2ca66db0873641746a5d85822aed2a70b094f88cb215de"} err="failed to get container status \"f01fc44ce14d9f74dd2ca66db0873641746a5d85822aed2a70b094f88cb215de\": rpc error: code = NotFound desc = could not find container \"f01fc44ce14d9f74dd2ca66db0873641746a5d85822aed2a70b094f88cb215de\": container with ID starting with f01fc44ce14d9f74dd2ca66db0873641746a5d85822aed2a70b094f88cb215de not found: ID does not exist" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.354497 4716 scope.go:117] "RemoveContainer" containerID="759c32ddb8bfcf642b77002cdb58d17ef69487357834a08e9b415fc324c10a9d" Dec 09 15:36:28 crc kubenswrapper[4716]: E1209 15:36:28.358492 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"759c32ddb8bfcf642b77002cdb58d17ef69487357834a08e9b415fc324c10a9d\": container with ID starting with 759c32ddb8bfcf642b77002cdb58d17ef69487357834a08e9b415fc324c10a9d not found: ID does not exist" containerID="759c32ddb8bfcf642b77002cdb58d17ef69487357834a08e9b415fc324c10a9d" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.358555 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"759c32ddb8bfcf642b77002cdb58d17ef69487357834a08e9b415fc324c10a9d"} err="failed to get container status \"759c32ddb8bfcf642b77002cdb58d17ef69487357834a08e9b415fc324c10a9d\": rpc error: code = NotFound desc = could not find container \"759c32ddb8bfcf642b77002cdb58d17ef69487357834a08e9b415fc324c10a9d\": container with ID starting with 759c32ddb8bfcf642b77002cdb58d17ef69487357834a08e9b415fc324c10a9d not found: ID does not exist" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.358594 4716 scope.go:117] "RemoveContainer" containerID="980c60e5f4ec95078821b6ff9ab591041a7b75cedf57a8f4897a6f56fbbd851f" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.400989 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8whzb\" (UniqueName: \"kubernetes.io/projected/bf841ea1-b016-46df-ba7a-8d5c97748eca-kube-api-access-8whzb\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.422116 4716 scope.go:117] "RemoveContainer" containerID="1599b215a1636d489ec4f1124b01861be276b86f4c020b43bb3a4d4695d84279" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.422128 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf841ea1-b016-46df-ba7a-8d5c97748eca-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bf841ea1-b016-46df-ba7a-8d5c97748eca" (UID: "bf841ea1-b016-46df-ba7a-8d5c97748eca"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.522838 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf841ea1-b016-46df-ba7a-8d5c97748eca-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.535226 4716 scope.go:117] "RemoveContainer" containerID="980c60e5f4ec95078821b6ff9ab591041a7b75cedf57a8f4897a6f56fbbd851f" Dec 09 15:36:28 crc kubenswrapper[4716]: E1209 15:36:28.535700 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"980c60e5f4ec95078821b6ff9ab591041a7b75cedf57a8f4897a6f56fbbd851f\": container with ID starting with 980c60e5f4ec95078821b6ff9ab591041a7b75cedf57a8f4897a6f56fbbd851f not found: ID does not exist" containerID="980c60e5f4ec95078821b6ff9ab591041a7b75cedf57a8f4897a6f56fbbd851f" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.535760 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"980c60e5f4ec95078821b6ff9ab591041a7b75cedf57a8f4897a6f56fbbd851f"} err="failed to get container status \"980c60e5f4ec95078821b6ff9ab591041a7b75cedf57a8f4897a6f56fbbd851f\": rpc error: code = NotFound desc = could not find container \"980c60e5f4ec95078821b6ff9ab591041a7b75cedf57a8f4897a6f56fbbd851f\": container with ID starting with 980c60e5f4ec95078821b6ff9ab591041a7b75cedf57a8f4897a6f56fbbd851f not found: ID does not exist" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.535819 4716 scope.go:117] "RemoveContainer" containerID="1599b215a1636d489ec4f1124b01861be276b86f4c020b43bb3a4d4695d84279" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.539537 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97431237-6850-496d-a176-2e8359c8e0e5-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "97431237-6850-496d-a176-2e8359c8e0e5" (UID: "97431237-6850-496d-a176-2e8359c8e0e5"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.539791 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97431237-6850-496d-a176-2e8359c8e0e5-config-data" (OuterVolumeSpecName: "config-data") pod "97431237-6850-496d-a176-2e8359c8e0e5" (UID: "97431237-6850-496d-a176-2e8359c8e0e5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.541201 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf841ea1-b016-46df-ba7a-8d5c97748eca-config-data" (OuterVolumeSpecName: "config-data") pod "bf841ea1-b016-46df-ba7a-8d5c97748eca" (UID: "bf841ea1-b016-46df-ba7a-8d5c97748eca"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:36:28 crc kubenswrapper[4716]: E1209 15:36:28.543193 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1599b215a1636d489ec4f1124b01861be276b86f4c020b43bb3a4d4695d84279\": container with ID starting with 1599b215a1636d489ec4f1124b01861be276b86f4c020b43bb3a4d4695d84279 not found: ID does not exist" containerID="1599b215a1636d489ec4f1124b01861be276b86f4c020b43bb3a4d4695d84279" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.543248 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1599b215a1636d489ec4f1124b01861be276b86f4c020b43bb3a4d4695d84279"} err="failed to get container status \"1599b215a1636d489ec4f1124b01861be276b86f4c020b43bb3a4d4695d84279\": rpc error: code = NotFound desc = could not find container \"1599b215a1636d489ec4f1124b01861be276b86f4c020b43bb3a4d4695d84279\": container with ID starting with 1599b215a1636d489ec4f1124b01861be276b86f4c020b43bb3a4d4695d84279 not found: ID does not exist" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.580322 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf841ea1-b016-46df-ba7a-8d5c97748eca-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "bf841ea1-b016-46df-ba7a-8d5c97748eca" (UID: "bf841ea1-b016-46df-ba7a-8d5c97748eca"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.595010 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf841ea1-b016-46df-ba7a-8d5c97748eca-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "bf841ea1-b016-46df-ba7a-8d5c97748eca" (UID: "bf841ea1-b016-46df-ba7a-8d5c97748eca"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.626750 4716 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf841ea1-b016-46df-ba7a-8d5c97748eca-public-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.627114 4716 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf841ea1-b016-46df-ba7a-8d5c97748eca-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.627129 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97431237-6850-496d-a176-2e8359c8e0e5-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.627142 4716 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/97431237-6850-496d-a176-2e8359c8e0e5-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.627155 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf841ea1-b016-46df-ba7a-8d5c97748eca-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.913299 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.942332 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.988333 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Dec 09 15:36:28 crc kubenswrapper[4716]: E1209 15:36:28.989013 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf841ea1-b016-46df-ba7a-8d5c97748eca" containerName="nova-api-log" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.989036 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf841ea1-b016-46df-ba7a-8d5c97748eca" containerName="nova-api-log" Dec 09 15:36:28 crc kubenswrapper[4716]: E1209 15:36:28.989055 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97431237-6850-496d-a176-2e8359c8e0e5" containerName="nova-metadata-log" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.989061 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="97431237-6850-496d-a176-2e8359c8e0e5" containerName="nova-metadata-log" Dec 09 15:36:28 crc kubenswrapper[4716]: E1209 15:36:28.989069 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf841ea1-b016-46df-ba7a-8d5c97748eca" containerName="nova-api-api" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.989075 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf841ea1-b016-46df-ba7a-8d5c97748eca" containerName="nova-api-api" Dec 09 15:36:28 crc kubenswrapper[4716]: E1209 15:36:28.989096 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97431237-6850-496d-a176-2e8359c8e0e5" containerName="nova-metadata-metadata" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.989102 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="97431237-6850-496d-a176-2e8359c8e0e5" containerName="nova-metadata-metadata" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.989381 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf841ea1-b016-46df-ba7a-8d5c97748eca" containerName="nova-api-log" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.989402 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="97431237-6850-496d-a176-2e8359c8e0e5" containerName="nova-metadata-log" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.989413 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf841ea1-b016-46df-ba7a-8d5c97748eca" containerName="nova-api-api" Dec 09 15:36:28 crc kubenswrapper[4716]: I1209 15:36:28.989439 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="97431237-6850-496d-a176-2e8359c8e0e5" containerName="nova-metadata-metadata" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.003198 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.006903 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.015497 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.023065 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.041749 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p8zk4\" (UniqueName: \"kubernetes.io/projected/24ac26ee-f25c-455c-ad9f-54a8991dd6f6-kube-api-access-p8zk4\") pod \"nova-metadata-0\" (UID: \"24ac26ee-f25c-455c-ad9f-54a8991dd6f6\") " pod="openstack/nova-metadata-0" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.041812 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24ac26ee-f25c-455c-ad9f-54a8991dd6f6-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"24ac26ee-f25c-455c-ad9f-54a8991dd6f6\") " pod="openstack/nova-metadata-0" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.041862 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/24ac26ee-f25c-455c-ad9f-54a8991dd6f6-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"24ac26ee-f25c-455c-ad9f-54a8991dd6f6\") " pod="openstack/nova-metadata-0" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.042015 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24ac26ee-f25c-455c-ad9f-54a8991dd6f6-config-data\") pod \"nova-metadata-0\" (UID: \"24ac26ee-f25c-455c-ad9f-54a8991dd6f6\") " pod="openstack/nova-metadata-0" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.042048 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/24ac26ee-f25c-455c-ad9f-54a8991dd6f6-logs\") pod \"nova-metadata-0\" (UID: \"24ac26ee-f25c-455c-ad9f-54a8991dd6f6\") " pod="openstack/nova-metadata-0" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.060742 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.080235 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.096155 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.099069 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.103176 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.111415 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.111712 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.119530 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.146198 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p8zk4\" (UniqueName: \"kubernetes.io/projected/24ac26ee-f25c-455c-ad9f-54a8991dd6f6-kube-api-access-p8zk4\") pod \"nova-metadata-0\" (UID: \"24ac26ee-f25c-455c-ad9f-54a8991dd6f6\") " pod="openstack/nova-metadata-0" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.146281 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24ac26ee-f25c-455c-ad9f-54a8991dd6f6-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"24ac26ee-f25c-455c-ad9f-54a8991dd6f6\") " pod="openstack/nova-metadata-0" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.146349 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/24ac26ee-f25c-455c-ad9f-54a8991dd6f6-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"24ac26ee-f25c-455c-ad9f-54a8991dd6f6\") " pod="openstack/nova-metadata-0" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.146515 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24ac26ee-f25c-455c-ad9f-54a8991dd6f6-config-data\") pod \"nova-metadata-0\" (UID: \"24ac26ee-f25c-455c-ad9f-54a8991dd6f6\") " pod="openstack/nova-metadata-0" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.146543 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/24ac26ee-f25c-455c-ad9f-54a8991dd6f6-logs\") pod \"nova-metadata-0\" (UID: \"24ac26ee-f25c-455c-ad9f-54a8991dd6f6\") " pod="openstack/nova-metadata-0" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.147147 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/24ac26ee-f25c-455c-ad9f-54a8991dd6f6-logs\") pod \"nova-metadata-0\" (UID: \"24ac26ee-f25c-455c-ad9f-54a8991dd6f6\") " pod="openstack/nova-metadata-0" Dec 09 15:36:29 crc kubenswrapper[4716]: E1209 15:36:29.164909 4716 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 89716d2237c6b8bfb35f35f26e47a735e221d64c751f1ab04d3aa30071330cbd is running failed: container process not found" containerID="89716d2237c6b8bfb35f35f26e47a735e221d64c751f1ab04d3aa30071330cbd" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 09 15:36:29 crc kubenswrapper[4716]: E1209 15:36:29.165476 4716 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 89716d2237c6b8bfb35f35f26e47a735e221d64c751f1ab04d3aa30071330cbd is running failed: container process not found" containerID="89716d2237c6b8bfb35f35f26e47a735e221d64c751f1ab04d3aa30071330cbd" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 09 15:36:29 crc kubenswrapper[4716]: E1209 15:36:29.166363 4716 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 89716d2237c6b8bfb35f35f26e47a735e221d64c751f1ab04d3aa30071330cbd is running failed: container process not found" containerID="89716d2237c6b8bfb35f35f26e47a735e221d64c751f1ab04d3aa30071330cbd" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 09 15:36:29 crc kubenswrapper[4716]: E1209 15:36:29.166500 4716 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 89716d2237c6b8bfb35f35f26e47a735e221d64c751f1ab04d3aa30071330cbd is running failed: container process not found" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="dd08e4af-c44e-421a-9547-39cd45eb46d2" containerName="nova-scheduler-scheduler" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.170378 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24ac26ee-f25c-455c-ad9f-54a8991dd6f6-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"24ac26ee-f25c-455c-ad9f-54a8991dd6f6\") " pod="openstack/nova-metadata-0" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.180480 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/24ac26ee-f25c-455c-ad9f-54a8991dd6f6-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"24ac26ee-f25c-455c-ad9f-54a8991dd6f6\") " pod="openstack/nova-metadata-0" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.198429 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24ac26ee-f25c-455c-ad9f-54a8991dd6f6-config-data\") pod \"nova-metadata-0\" (UID: \"24ac26ee-f25c-455c-ad9f-54a8991dd6f6\") " pod="openstack/nova-metadata-0" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.221355 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p8zk4\" (UniqueName: \"kubernetes.io/projected/24ac26ee-f25c-455c-ad9f-54a8991dd6f6-kube-api-access-p8zk4\") pod \"nova-metadata-0\" (UID: \"24ac26ee-f25c-455c-ad9f-54a8991dd6f6\") " pod="openstack/nova-metadata-0" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.232984 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="97431237-6850-496d-a176-2e8359c8e0e5" path="/var/lib/kubelet/pods/97431237-6850-496d-a176-2e8359c8e0e5/volumes" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.234759 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf841ea1-b016-46df-ba7a-8d5c97748eca" path="/var/lib/kubelet/pods/bf841ea1-b016-46df-ba7a-8d5c97748eca/volumes" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.251006 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b2t97\" (UniqueName: \"kubernetes.io/projected/cc126966-2e68-43bc-a129-8f93723b78c1-kube-api-access-b2t97\") pod \"nova-api-0\" (UID: \"cc126966-2e68-43bc-a129-8f93723b78c1\") " pod="openstack/nova-api-0" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.251196 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc126966-2e68-43bc-a129-8f93723b78c1-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"cc126966-2e68-43bc-a129-8f93723b78c1\") " pod="openstack/nova-api-0" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.251285 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc126966-2e68-43bc-a129-8f93723b78c1-config-data\") pod \"nova-api-0\" (UID: \"cc126966-2e68-43bc-a129-8f93723b78c1\") " pod="openstack/nova-api-0" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.251309 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc126966-2e68-43bc-a129-8f93723b78c1-internal-tls-certs\") pod \"nova-api-0\" (UID: \"cc126966-2e68-43bc-a129-8f93723b78c1\") " pod="openstack/nova-api-0" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.251406 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc126966-2e68-43bc-a129-8f93723b78c1-public-tls-certs\") pod \"nova-api-0\" (UID: \"cc126966-2e68-43bc-a129-8f93723b78c1\") " pod="openstack/nova-api-0" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.251532 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cc126966-2e68-43bc-a129-8f93723b78c1-logs\") pod \"nova-api-0\" (UID: \"cc126966-2e68-43bc-a129-8f93723b78c1\") " pod="openstack/nova-api-0" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.302981 4716 generic.go:334] "Generic (PLEG): container finished" podID="dd08e4af-c44e-421a-9547-39cd45eb46d2" containerID="89716d2237c6b8bfb35f35f26e47a735e221d64c751f1ab04d3aa30071330cbd" exitCode=0 Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.303284 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"dd08e4af-c44e-421a-9547-39cd45eb46d2","Type":"ContainerDied","Data":"89716d2237c6b8bfb35f35f26e47a735e221d64c751f1ab04d3aa30071330cbd"} Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.317196 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"81faf0e1-4428-4ed2-8d1f-98d0fd11bc40","Type":"ContainerStarted","Data":"5f422accd44e1c6f44b44829afd7459384bfb09dd03ba0c3b3c485033ffb380d"} Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.349937 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.354050 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cc126966-2e68-43bc-a129-8f93723b78c1-logs\") pod \"nova-api-0\" (UID: \"cc126966-2e68-43bc-a129-8f93723b78c1\") " pod="openstack/nova-api-0" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.354386 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b2t97\" (UniqueName: \"kubernetes.io/projected/cc126966-2e68-43bc-a129-8f93723b78c1-kube-api-access-b2t97\") pod \"nova-api-0\" (UID: \"cc126966-2e68-43bc-a129-8f93723b78c1\") " pod="openstack/nova-api-0" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.354527 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc126966-2e68-43bc-a129-8f93723b78c1-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"cc126966-2e68-43bc-a129-8f93723b78c1\") " pod="openstack/nova-api-0" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.354725 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc126966-2e68-43bc-a129-8f93723b78c1-config-data\") pod \"nova-api-0\" (UID: \"cc126966-2e68-43bc-a129-8f93723b78c1\") " pod="openstack/nova-api-0" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.354847 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc126966-2e68-43bc-a129-8f93723b78c1-internal-tls-certs\") pod \"nova-api-0\" (UID: \"cc126966-2e68-43bc-a129-8f93723b78c1\") " pod="openstack/nova-api-0" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.354960 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc126966-2e68-43bc-a129-8f93723b78c1-public-tls-certs\") pod \"nova-api-0\" (UID: \"cc126966-2e68-43bc-a129-8f93723b78c1\") " pod="openstack/nova-api-0" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.357477 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cc126966-2e68-43bc-a129-8f93723b78c1-logs\") pod \"nova-api-0\" (UID: \"cc126966-2e68-43bc-a129-8f93723b78c1\") " pod="openstack/nova-api-0" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.358893 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc126966-2e68-43bc-a129-8f93723b78c1-public-tls-certs\") pod \"nova-api-0\" (UID: \"cc126966-2e68-43bc-a129-8f93723b78c1\") " pod="openstack/nova-api-0" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.373371 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc126966-2e68-43bc-a129-8f93723b78c1-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"cc126966-2e68-43bc-a129-8f93723b78c1\") " pod="openstack/nova-api-0" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.376501 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc126966-2e68-43bc-a129-8f93723b78c1-internal-tls-certs\") pod \"nova-api-0\" (UID: \"cc126966-2e68-43bc-a129-8f93723b78c1\") " pod="openstack/nova-api-0" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.377396 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc126966-2e68-43bc-a129-8f93723b78c1-config-data\") pod \"nova-api-0\" (UID: \"cc126966-2e68-43bc-a129-8f93723b78c1\") " pod="openstack/nova-api-0" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.382879 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b2t97\" (UniqueName: \"kubernetes.io/projected/cc126966-2e68-43bc-a129-8f93723b78c1-kube-api-access-b2t97\") pod \"nova-api-0\" (UID: \"cc126966-2e68-43bc-a129-8f93723b78c1\") " pod="openstack/nova-api-0" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.469513 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.698333 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.766046 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd08e4af-c44e-421a-9547-39cd45eb46d2-config-data\") pod \"dd08e4af-c44e-421a-9547-39cd45eb46d2\" (UID: \"dd08e4af-c44e-421a-9547-39cd45eb46d2\") " Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.766281 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd08e4af-c44e-421a-9547-39cd45eb46d2-combined-ca-bundle\") pod \"dd08e4af-c44e-421a-9547-39cd45eb46d2\" (UID: \"dd08e4af-c44e-421a-9547-39cd45eb46d2\") " Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.766359 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sfrlp\" (UniqueName: \"kubernetes.io/projected/dd08e4af-c44e-421a-9547-39cd45eb46d2-kube-api-access-sfrlp\") pod \"dd08e4af-c44e-421a-9547-39cd45eb46d2\" (UID: \"dd08e4af-c44e-421a-9547-39cd45eb46d2\") " Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.782607 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd08e4af-c44e-421a-9547-39cd45eb46d2-kube-api-access-sfrlp" (OuterVolumeSpecName: "kube-api-access-sfrlp") pod "dd08e4af-c44e-421a-9547-39cd45eb46d2" (UID: "dd08e4af-c44e-421a-9547-39cd45eb46d2"). InnerVolumeSpecName "kube-api-access-sfrlp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.810757 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd08e4af-c44e-421a-9547-39cd45eb46d2-config-data" (OuterVolumeSpecName: "config-data") pod "dd08e4af-c44e-421a-9547-39cd45eb46d2" (UID: "dd08e4af-c44e-421a-9547-39cd45eb46d2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.816380 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd08e4af-c44e-421a-9547-39cd45eb46d2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dd08e4af-c44e-421a-9547-39cd45eb46d2" (UID: "dd08e4af-c44e-421a-9547-39cd45eb46d2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.869799 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sfrlp\" (UniqueName: \"kubernetes.io/projected/dd08e4af-c44e-421a-9547-39cd45eb46d2-kube-api-access-sfrlp\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.869840 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd08e4af-c44e-421a-9547-39cd45eb46d2-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:29 crc kubenswrapper[4716]: I1209 15:36:29.869853 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd08e4af-c44e-421a-9547-39cd45eb46d2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:36:30 crc kubenswrapper[4716]: I1209 15:36:30.036198 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 09 15:36:30 crc kubenswrapper[4716]: I1209 15:36:30.280764 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 09 15:36:30 crc kubenswrapper[4716]: W1209 15:36:30.290345 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcc126966_2e68_43bc_a129_8f93723b78c1.slice/crio-1c6cafb763eec49ba51ad31fb6ac654e065b016d7eaf02f7f843aa7e4c5eb6fe WatchSource:0}: Error finding container 1c6cafb763eec49ba51ad31fb6ac654e065b016d7eaf02f7f843aa7e4c5eb6fe: Status 404 returned error can't find the container with id 1c6cafb763eec49ba51ad31fb6ac654e065b016d7eaf02f7f843aa7e4c5eb6fe Dec 09 15:36:30 crc kubenswrapper[4716]: I1209 15:36:30.350279 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"24ac26ee-f25c-455c-ad9f-54a8991dd6f6","Type":"ContainerStarted","Data":"ad53e50ace279864e0ff033969f5204ed3e2bbffa0c092d9c3751e95088c5785"} Dec 09 15:36:30 crc kubenswrapper[4716]: I1209 15:36:30.351860 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cc126966-2e68-43bc-a129-8f93723b78c1","Type":"ContainerStarted","Data":"1c6cafb763eec49ba51ad31fb6ac654e065b016d7eaf02f7f843aa7e4c5eb6fe"} Dec 09 15:36:30 crc kubenswrapper[4716]: I1209 15:36:30.357051 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"dd08e4af-c44e-421a-9547-39cd45eb46d2","Type":"ContainerDied","Data":"a8a23ec1af70db5df0ebd84d4f1da49d0c415918764476262f3ecf5337106717"} Dec 09 15:36:30 crc kubenswrapper[4716]: I1209 15:36:30.357100 4716 scope.go:117] "RemoveContainer" containerID="89716d2237c6b8bfb35f35f26e47a735e221d64c751f1ab04d3aa30071330cbd" Dec 09 15:36:30 crc kubenswrapper[4716]: I1209 15:36:30.357252 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 09 15:36:30 crc kubenswrapper[4716]: I1209 15:36:30.368871 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"81faf0e1-4428-4ed2-8d1f-98d0fd11bc40","Type":"ContainerStarted","Data":"ea8c4a952a70f42d609c1f0151a6ff4d486c1e1c52df9b7f42066b9e1ac5d584"} Dec 09 15:36:30 crc kubenswrapper[4716]: I1209 15:36:30.422138 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Dec 09 15:36:30 crc kubenswrapper[4716]: I1209 15:36:30.439121 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Dec 09 15:36:30 crc kubenswrapper[4716]: I1209 15:36:30.454595 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Dec 09 15:36:30 crc kubenswrapper[4716]: E1209 15:36:30.455342 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd08e4af-c44e-421a-9547-39cd45eb46d2" containerName="nova-scheduler-scheduler" Dec 09 15:36:30 crc kubenswrapper[4716]: I1209 15:36:30.455369 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd08e4af-c44e-421a-9547-39cd45eb46d2" containerName="nova-scheduler-scheduler" Dec 09 15:36:30 crc kubenswrapper[4716]: I1209 15:36:30.455679 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd08e4af-c44e-421a-9547-39cd45eb46d2" containerName="nova-scheduler-scheduler" Dec 09 15:36:30 crc kubenswrapper[4716]: I1209 15:36:30.456592 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 09 15:36:30 crc kubenswrapper[4716]: I1209 15:36:30.471191 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Dec 09 15:36:30 crc kubenswrapper[4716]: I1209 15:36:30.480351 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 09 15:36:30 crc kubenswrapper[4716]: I1209 15:36:30.625106 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2nsrr\" (UniqueName: \"kubernetes.io/projected/785bf387-d72e-49ad-8d43-9ef6c1475fb4-kube-api-access-2nsrr\") pod \"nova-scheduler-0\" (UID: \"785bf387-d72e-49ad-8d43-9ef6c1475fb4\") " pod="openstack/nova-scheduler-0" Dec 09 15:36:30 crc kubenswrapper[4716]: I1209 15:36:30.625536 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/785bf387-d72e-49ad-8d43-9ef6c1475fb4-config-data\") pod \"nova-scheduler-0\" (UID: \"785bf387-d72e-49ad-8d43-9ef6c1475fb4\") " pod="openstack/nova-scheduler-0" Dec 09 15:36:30 crc kubenswrapper[4716]: I1209 15:36:30.625791 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/785bf387-d72e-49ad-8d43-9ef6c1475fb4-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"785bf387-d72e-49ad-8d43-9ef6c1475fb4\") " pod="openstack/nova-scheduler-0" Dec 09 15:36:30 crc kubenswrapper[4716]: I1209 15:36:30.736220 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/785bf387-d72e-49ad-8d43-9ef6c1475fb4-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"785bf387-d72e-49ad-8d43-9ef6c1475fb4\") " pod="openstack/nova-scheduler-0" Dec 09 15:36:30 crc kubenswrapper[4716]: I1209 15:36:30.736679 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2nsrr\" (UniqueName: \"kubernetes.io/projected/785bf387-d72e-49ad-8d43-9ef6c1475fb4-kube-api-access-2nsrr\") pod \"nova-scheduler-0\" (UID: \"785bf387-d72e-49ad-8d43-9ef6c1475fb4\") " pod="openstack/nova-scheduler-0" Dec 09 15:36:30 crc kubenswrapper[4716]: I1209 15:36:30.736882 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/785bf387-d72e-49ad-8d43-9ef6c1475fb4-config-data\") pod \"nova-scheduler-0\" (UID: \"785bf387-d72e-49ad-8d43-9ef6c1475fb4\") " pod="openstack/nova-scheduler-0" Dec 09 15:36:30 crc kubenswrapper[4716]: I1209 15:36:30.740480 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/785bf387-d72e-49ad-8d43-9ef6c1475fb4-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"785bf387-d72e-49ad-8d43-9ef6c1475fb4\") " pod="openstack/nova-scheduler-0" Dec 09 15:36:30 crc kubenswrapper[4716]: I1209 15:36:30.749866 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/785bf387-d72e-49ad-8d43-9ef6c1475fb4-config-data\") pod \"nova-scheduler-0\" (UID: \"785bf387-d72e-49ad-8d43-9ef6c1475fb4\") " pod="openstack/nova-scheduler-0" Dec 09 15:36:30 crc kubenswrapper[4716]: I1209 15:36:30.754822 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2nsrr\" (UniqueName: \"kubernetes.io/projected/785bf387-d72e-49ad-8d43-9ef6c1475fb4-kube-api-access-2nsrr\") pod \"nova-scheduler-0\" (UID: \"785bf387-d72e-49ad-8d43-9ef6c1475fb4\") " pod="openstack/nova-scheduler-0" Dec 09 15:36:30 crc kubenswrapper[4716]: I1209 15:36:30.795361 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 09 15:36:31 crc kubenswrapper[4716]: I1209 15:36:31.230033 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dd08e4af-c44e-421a-9547-39cd45eb46d2" path="/var/lib/kubelet/pods/dd08e4af-c44e-421a-9547-39cd45eb46d2/volumes" Dec 09 15:36:31 crc kubenswrapper[4716]: I1209 15:36:31.308667 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 09 15:36:31 crc kubenswrapper[4716]: I1209 15:36:31.387457 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"785bf387-d72e-49ad-8d43-9ef6c1475fb4","Type":"ContainerStarted","Data":"79597a91e590cd4290057d1cd923ffe9de41a8793c55c19701e5324c64ddde0d"} Dec 09 15:36:31 crc kubenswrapper[4716]: I1209 15:36:31.395869 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"81faf0e1-4428-4ed2-8d1f-98d0fd11bc40","Type":"ContainerStarted","Data":"37252f93f72256b003cdd61d6f2e8457105e4e4d289153aa2cb8903633812276"} Dec 09 15:36:31 crc kubenswrapper[4716]: I1209 15:36:31.400133 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"24ac26ee-f25c-455c-ad9f-54a8991dd6f6","Type":"ContainerStarted","Data":"0d84a2ee2006c1a0a5d35de92709923923b4c2a21ca084378b5acc13860e3aa3"} Dec 09 15:36:31 crc kubenswrapper[4716]: I1209 15:36:31.400179 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"24ac26ee-f25c-455c-ad9f-54a8991dd6f6","Type":"ContainerStarted","Data":"588ac83b9a04bc20d328a171624ab2154ab3351c94b4aa698fc570c86ab9dab8"} Dec 09 15:36:31 crc kubenswrapper[4716]: I1209 15:36:31.403342 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cc126966-2e68-43bc-a129-8f93723b78c1","Type":"ContainerStarted","Data":"07deb39d4980d46f89d13e63e0655ec4c7a2dd5c6066e54d197425a8dd015cdd"} Dec 09 15:36:31 crc kubenswrapper[4716]: I1209 15:36:31.403390 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cc126966-2e68-43bc-a129-8f93723b78c1","Type":"ContainerStarted","Data":"27a50cb8af97c525567a60be63261f5c5ca3fd7427740351287c18ce35efb5df"} Dec 09 15:36:31 crc kubenswrapper[4716]: I1209 15:36:31.439795 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.439768335 podStartE2EDuration="3.439768335s" podCreationTimestamp="2025-12-09 15:36:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:36:31.424704358 +0000 UTC m=+1678.579448346" watchObservedRunningTime="2025-12-09 15:36:31.439768335 +0000 UTC m=+1678.594512323" Dec 09 15:36:31 crc kubenswrapper[4716]: I1209 15:36:31.466163 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.466140582 podStartE2EDuration="3.466140582s" podCreationTimestamp="2025-12-09 15:36:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:36:31.446167356 +0000 UTC m=+1678.600911374" watchObservedRunningTime="2025-12-09 15:36:31.466140582 +0000 UTC m=+1678.620884570" Dec 09 15:36:32 crc kubenswrapper[4716]: I1209 15:36:32.424008 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"785bf387-d72e-49ad-8d43-9ef6c1475fb4","Type":"ContainerStarted","Data":"4049e00ac7ec0ce81a08d121876fba6557794d6f3b685eeaf503e3ca6b4fbcb7"} Dec 09 15:36:32 crc kubenswrapper[4716]: I1209 15:36:32.431010 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"81faf0e1-4428-4ed2-8d1f-98d0fd11bc40","Type":"ContainerStarted","Data":"9ceb29346c62d0162a104db9a33a4d12e55799d3cefad7bec6e965c58de06001"} Dec 09 15:36:32 crc kubenswrapper[4716]: I1209 15:36:32.457413 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.457393632 podStartE2EDuration="2.457393632s" podCreationTimestamp="2025-12-09 15:36:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:36:32.44921581 +0000 UTC m=+1679.603959798" watchObservedRunningTime="2025-12-09 15:36:32.457393632 +0000 UTC m=+1679.612137620" Dec 09 15:36:32 crc kubenswrapper[4716]: I1209 15:36:32.474153 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=2.381763092 podStartE2EDuration="6.474128746s" podCreationTimestamp="2025-12-09 15:36:26 +0000 UTC" firstStartedPulling="2025-12-09 15:36:27.466935337 +0000 UTC m=+1674.621679325" lastFinishedPulling="2025-12-09 15:36:31.559300991 +0000 UTC m=+1678.714044979" observedRunningTime="2025-12-09 15:36:32.469889076 +0000 UTC m=+1679.624633064" watchObservedRunningTime="2025-12-09 15:36:32.474128746 +0000 UTC m=+1679.628872734" Dec 09 15:36:34 crc kubenswrapper[4716]: I1209 15:36:34.351469 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 09 15:36:34 crc kubenswrapper[4716]: I1209 15:36:34.352136 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 09 15:36:35 crc kubenswrapper[4716]: I1209 15:36:35.870504 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Dec 09 15:36:38 crc kubenswrapper[4716]: I1209 15:36:38.280231 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Dec 09 15:36:39 crc kubenswrapper[4716]: I1209 15:36:39.352439 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Dec 09 15:36:39 crc kubenswrapper[4716]: I1209 15:36:39.352748 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Dec 09 15:36:39 crc kubenswrapper[4716]: I1209 15:36:39.471546 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 09 15:36:39 crc kubenswrapper[4716]: I1209 15:36:39.471603 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 09 15:36:40 crc kubenswrapper[4716]: I1209 15:36:40.695711 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="24ac26ee-f25c-455c-ad9f-54a8991dd6f6" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.1.0:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 09 15:36:40 crc kubenswrapper[4716]: I1209 15:36:40.708216 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="cc126966-2e68-43bc-a129-8f93723b78c1" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.1.1:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 09 15:36:40 crc kubenswrapper[4716]: I1209 15:36:40.719122 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="cc126966-2e68-43bc-a129-8f93723b78c1" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.1.1:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 09 15:36:40 crc kubenswrapper[4716]: I1209 15:36:40.721333 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="24ac26ee-f25c-455c-ad9f-54a8991dd6f6" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.1.0:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 09 15:36:40 crc kubenswrapper[4716]: I1209 15:36:40.796784 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Dec 09 15:36:40 crc kubenswrapper[4716]: I1209 15:36:40.852018 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Dec 09 15:36:41 crc kubenswrapper[4716]: I1209 15:36:41.234470 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Dec 09 15:36:48 crc kubenswrapper[4716]: I1209 15:36:48.030248 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 15:36:48 crc kubenswrapper[4716]: I1209 15:36:48.033070 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 15:36:48 crc kubenswrapper[4716]: I1209 15:36:48.033154 4716 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" Dec 09 15:36:48 crc kubenswrapper[4716]: I1209 15:36:48.034386 4716 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f23d8f973b0c391889ecea36a999ba03ea85dda0d274716304ac66db87b6b8dc"} pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 15:36:48 crc kubenswrapper[4716]: I1209 15:36:48.034479 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" containerID="cri-o://f23d8f973b0c391889ecea36a999ba03ea85dda0d274716304ac66db87b6b8dc" gracePeriod=600 Dec 09 15:36:48 crc kubenswrapper[4716]: I1209 15:36:48.312685 4716 generic.go:334] "Generic (PLEG): container finished" podID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerID="f23d8f973b0c391889ecea36a999ba03ea85dda0d274716304ac66db87b6b8dc" exitCode=0 Dec 09 15:36:48 crc kubenswrapper[4716]: I1209 15:36:48.312757 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerDied","Data":"f23d8f973b0c391889ecea36a999ba03ea85dda0d274716304ac66db87b6b8dc"} Dec 09 15:36:48 crc kubenswrapper[4716]: I1209 15:36:48.313017 4716 scope.go:117] "RemoveContainer" containerID="86592a964b516fe613abb12daefa0047ee74a39b779e6980fea7c1589b5faf81" Dec 09 15:36:49 crc kubenswrapper[4716]: E1209 15:36:49.279990 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:36:49 crc kubenswrapper[4716]: I1209 15:36:49.329444 4716 scope.go:117] "RemoveContainer" containerID="f23d8f973b0c391889ecea36a999ba03ea85dda0d274716304ac66db87b6b8dc" Dec 09 15:36:49 crc kubenswrapper[4716]: E1209 15:36:49.330154 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:36:49 crc kubenswrapper[4716]: I1209 15:36:49.360015 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Dec 09 15:36:49 crc kubenswrapper[4716]: I1209 15:36:49.376093 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Dec 09 15:36:49 crc kubenswrapper[4716]: I1209 15:36:49.387114 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Dec 09 15:36:49 crc kubenswrapper[4716]: I1209 15:36:49.478804 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Dec 09 15:36:49 crc kubenswrapper[4716]: I1209 15:36:49.479427 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 09 15:36:49 crc kubenswrapper[4716]: I1209 15:36:49.486462 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Dec 09 15:36:49 crc kubenswrapper[4716]: I1209 15:36:49.488933 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Dec 09 15:36:50 crc kubenswrapper[4716]: I1209 15:36:50.338940 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 09 15:36:50 crc kubenswrapper[4716]: I1209 15:36:50.343424 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Dec 09 15:36:50 crc kubenswrapper[4716]: I1209 15:36:50.345728 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Dec 09 15:37:02 crc kubenswrapper[4716]: I1209 15:37:02.869666 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-sync-xr25h"] Dec 09 15:37:02 crc kubenswrapper[4716]: I1209 15:37:02.881426 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-sync-xr25h"] Dec 09 15:37:02 crc kubenswrapper[4716]: I1209 15:37:02.986941 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-msshl"] Dec 09 15:37:02 crc kubenswrapper[4716]: I1209 15:37:02.989484 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-msshl" Dec 09 15:37:03 crc kubenswrapper[4716]: I1209 15:37:03.007997 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-msshl"] Dec 09 15:37:03 crc kubenswrapper[4716]: I1209 15:37:03.140961 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/239c4119-944c-46e1-9425-285eeb6e0204-combined-ca-bundle\") pod \"heat-db-sync-msshl\" (UID: \"239c4119-944c-46e1-9425-285eeb6e0204\") " pod="openstack/heat-db-sync-msshl" Dec 09 15:37:03 crc kubenswrapper[4716]: I1209 15:37:03.141185 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/239c4119-944c-46e1-9425-285eeb6e0204-config-data\") pod \"heat-db-sync-msshl\" (UID: \"239c4119-944c-46e1-9425-285eeb6e0204\") " pod="openstack/heat-db-sync-msshl" Dec 09 15:37:03 crc kubenswrapper[4716]: I1209 15:37:03.141280 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qfm49\" (UniqueName: \"kubernetes.io/projected/239c4119-944c-46e1-9425-285eeb6e0204-kube-api-access-qfm49\") pod \"heat-db-sync-msshl\" (UID: \"239c4119-944c-46e1-9425-285eeb6e0204\") " pod="openstack/heat-db-sync-msshl" Dec 09 15:37:03 crc kubenswrapper[4716]: I1209 15:37:03.231590 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e1fa7713-f67e-45a2-81c0-73a56280f744" path="/var/lib/kubelet/pods/e1fa7713-f67e-45a2-81c0-73a56280f744/volumes" Dec 09 15:37:03 crc kubenswrapper[4716]: I1209 15:37:03.243249 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/239c4119-944c-46e1-9425-285eeb6e0204-combined-ca-bundle\") pod \"heat-db-sync-msshl\" (UID: \"239c4119-944c-46e1-9425-285eeb6e0204\") " pod="openstack/heat-db-sync-msshl" Dec 09 15:37:03 crc kubenswrapper[4716]: I1209 15:37:03.243323 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/239c4119-944c-46e1-9425-285eeb6e0204-config-data\") pod \"heat-db-sync-msshl\" (UID: \"239c4119-944c-46e1-9425-285eeb6e0204\") " pod="openstack/heat-db-sync-msshl" Dec 09 15:37:03 crc kubenswrapper[4716]: I1209 15:37:03.243371 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qfm49\" (UniqueName: \"kubernetes.io/projected/239c4119-944c-46e1-9425-285eeb6e0204-kube-api-access-qfm49\") pod \"heat-db-sync-msshl\" (UID: \"239c4119-944c-46e1-9425-285eeb6e0204\") " pod="openstack/heat-db-sync-msshl" Dec 09 15:37:03 crc kubenswrapper[4716]: I1209 15:37:03.249952 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/239c4119-944c-46e1-9425-285eeb6e0204-combined-ca-bundle\") pod \"heat-db-sync-msshl\" (UID: \"239c4119-944c-46e1-9425-285eeb6e0204\") " pod="openstack/heat-db-sync-msshl" Dec 09 15:37:03 crc kubenswrapper[4716]: I1209 15:37:03.250951 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/239c4119-944c-46e1-9425-285eeb6e0204-config-data\") pod \"heat-db-sync-msshl\" (UID: \"239c4119-944c-46e1-9425-285eeb6e0204\") " pod="openstack/heat-db-sync-msshl" Dec 09 15:37:03 crc kubenswrapper[4716]: I1209 15:37:03.260496 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qfm49\" (UniqueName: \"kubernetes.io/projected/239c4119-944c-46e1-9425-285eeb6e0204-kube-api-access-qfm49\") pod \"heat-db-sync-msshl\" (UID: \"239c4119-944c-46e1-9425-285eeb6e0204\") " pod="openstack/heat-db-sync-msshl" Dec 09 15:37:03 crc kubenswrapper[4716]: I1209 15:37:03.324973 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-msshl" Dec 09 15:37:04 crc kubenswrapper[4716]: I1209 15:37:04.212796 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-msshl"] Dec 09 15:37:04 crc kubenswrapper[4716]: I1209 15:37:04.215010 4716 scope.go:117] "RemoveContainer" containerID="f23d8f973b0c391889ecea36a999ba03ea85dda0d274716304ac66db87b6b8dc" Dec 09 15:37:04 crc kubenswrapper[4716]: E1209 15:37:04.215274 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:37:04 crc kubenswrapper[4716]: E1209 15:37:04.367358 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 09 15:37:04 crc kubenswrapper[4716]: E1209 15:37:04.367425 4716 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 09 15:37:04 crc kubenswrapper[4716]: E1209 15:37:04.367614 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qfm49,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-msshl_openstack(239c4119-944c-46e1-9425-285eeb6e0204): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 15:37:04 crc kubenswrapper[4716]: E1209 15:37:04.368817 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:37:04 crc kubenswrapper[4716]: I1209 15:37:04.956342 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-msshl" event={"ID":"239c4119-944c-46e1-9425-285eeb6e0204","Type":"ContainerStarted","Data":"f1d91d0fc51a5b5d7a748f24b2908cc8dde2e3002e78673659b19946182b45e5"} Dec 09 15:37:04 crc kubenswrapper[4716]: E1209 15:37:04.964880 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:37:05 crc kubenswrapper[4716]: I1209 15:37:05.665808 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:37:05 crc kubenswrapper[4716]: I1209 15:37:05.666162 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="890bfa10-578a-4a64-8829-b2898dd4e02a" containerName="ceilometer-central-agent" containerID="cri-o://35256f2291ecc9c70fbd323d34d8e7201c8013357ceeea56685c750b50e6ba33" gracePeriod=30 Dec 09 15:37:05 crc kubenswrapper[4716]: I1209 15:37:05.666268 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="890bfa10-578a-4a64-8829-b2898dd4e02a" containerName="proxy-httpd" containerID="cri-o://bd85c13b06e519867a19e91109220c112981d6fb55edd5f54a8793b135718ca4" gracePeriod=30 Dec 09 15:37:05 crc kubenswrapper[4716]: I1209 15:37:05.666268 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="890bfa10-578a-4a64-8829-b2898dd4e02a" containerName="sg-core" containerID="cri-o://a3180779017091dd283d6463f60890dd7527f3e991b2f5c90d5137befe4db645" gracePeriod=30 Dec 09 15:37:05 crc kubenswrapper[4716]: I1209 15:37:05.666364 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="890bfa10-578a-4a64-8829-b2898dd4e02a" containerName="ceilometer-notification-agent" containerID="cri-o://10cbf7908a1b4272450e9e0370647ee92ba9c3d4f24bfafa1a7f1d89b2d0b2dc" gracePeriod=30 Dec 09 15:37:06 crc kubenswrapper[4716]: I1209 15:37:06.001928 4716 generic.go:334] "Generic (PLEG): container finished" podID="890bfa10-578a-4a64-8829-b2898dd4e02a" containerID="bd85c13b06e519867a19e91109220c112981d6fb55edd5f54a8793b135718ca4" exitCode=0 Dec 09 15:37:06 crc kubenswrapper[4716]: I1209 15:37:06.002274 4716 generic.go:334] "Generic (PLEG): container finished" podID="890bfa10-578a-4a64-8829-b2898dd4e02a" containerID="a3180779017091dd283d6463f60890dd7527f3e991b2f5c90d5137befe4db645" exitCode=2 Dec 09 15:37:06 crc kubenswrapper[4716]: I1209 15:37:06.011152 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"890bfa10-578a-4a64-8829-b2898dd4e02a","Type":"ContainerDied","Data":"bd85c13b06e519867a19e91109220c112981d6fb55edd5f54a8793b135718ca4"} Dec 09 15:37:06 crc kubenswrapper[4716]: I1209 15:37:06.011206 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"890bfa10-578a-4a64-8829-b2898dd4e02a","Type":"ContainerDied","Data":"a3180779017091dd283d6463f60890dd7527f3e991b2f5c90d5137befe4db645"} Dec 09 15:37:06 crc kubenswrapper[4716]: E1209 15:37:06.018128 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:37:06 crc kubenswrapper[4716]: I1209 15:37:06.034227 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 09 15:37:06 crc kubenswrapper[4716]: I1209 15:37:06.745347 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 09 15:37:07 crc kubenswrapper[4716]: I1209 15:37:07.033781 4716 generic.go:334] "Generic (PLEG): container finished" podID="890bfa10-578a-4a64-8829-b2898dd4e02a" containerID="10cbf7908a1b4272450e9e0370647ee92ba9c3d4f24bfafa1a7f1d89b2d0b2dc" exitCode=0 Dec 09 15:37:07 crc kubenswrapper[4716]: I1209 15:37:07.034105 4716 generic.go:334] "Generic (PLEG): container finished" podID="890bfa10-578a-4a64-8829-b2898dd4e02a" containerID="35256f2291ecc9c70fbd323d34d8e7201c8013357ceeea56685c750b50e6ba33" exitCode=0 Dec 09 15:37:07 crc kubenswrapper[4716]: I1209 15:37:07.034149 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"890bfa10-578a-4a64-8829-b2898dd4e02a","Type":"ContainerDied","Data":"10cbf7908a1b4272450e9e0370647ee92ba9c3d4f24bfafa1a7f1d89b2d0b2dc"} Dec 09 15:37:07 crc kubenswrapper[4716]: I1209 15:37:07.034196 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"890bfa10-578a-4a64-8829-b2898dd4e02a","Type":"ContainerDied","Data":"35256f2291ecc9c70fbd323d34d8e7201c8013357ceeea56685c750b50e6ba33"} Dec 09 15:37:07 crc kubenswrapper[4716]: I1209 15:37:07.373013 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 15:37:07 crc kubenswrapper[4716]: I1209 15:37:07.526172 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/890bfa10-578a-4a64-8829-b2898dd4e02a-log-httpd\") pod \"890bfa10-578a-4a64-8829-b2898dd4e02a\" (UID: \"890bfa10-578a-4a64-8829-b2898dd4e02a\") " Dec 09 15:37:07 crc kubenswrapper[4716]: I1209 15:37:07.526267 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/890bfa10-578a-4a64-8829-b2898dd4e02a-ceilometer-tls-certs\") pod \"890bfa10-578a-4a64-8829-b2898dd4e02a\" (UID: \"890bfa10-578a-4a64-8829-b2898dd4e02a\") " Dec 09 15:37:07 crc kubenswrapper[4716]: I1209 15:37:07.526449 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/890bfa10-578a-4a64-8829-b2898dd4e02a-config-data\") pod \"890bfa10-578a-4a64-8829-b2898dd4e02a\" (UID: \"890bfa10-578a-4a64-8829-b2898dd4e02a\") " Dec 09 15:37:07 crc kubenswrapper[4716]: I1209 15:37:07.526555 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/890bfa10-578a-4a64-8829-b2898dd4e02a-run-httpd\") pod \"890bfa10-578a-4a64-8829-b2898dd4e02a\" (UID: \"890bfa10-578a-4a64-8829-b2898dd4e02a\") " Dec 09 15:37:07 crc kubenswrapper[4716]: I1209 15:37:07.526612 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/890bfa10-578a-4a64-8829-b2898dd4e02a-scripts\") pod \"890bfa10-578a-4a64-8829-b2898dd4e02a\" (UID: \"890bfa10-578a-4a64-8829-b2898dd4e02a\") " Dec 09 15:37:07 crc kubenswrapper[4716]: I1209 15:37:07.526680 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kq29d\" (UniqueName: \"kubernetes.io/projected/890bfa10-578a-4a64-8829-b2898dd4e02a-kube-api-access-kq29d\") pod \"890bfa10-578a-4a64-8829-b2898dd4e02a\" (UID: \"890bfa10-578a-4a64-8829-b2898dd4e02a\") " Dec 09 15:37:07 crc kubenswrapper[4716]: I1209 15:37:07.526708 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/890bfa10-578a-4a64-8829-b2898dd4e02a-combined-ca-bundle\") pod \"890bfa10-578a-4a64-8829-b2898dd4e02a\" (UID: \"890bfa10-578a-4a64-8829-b2898dd4e02a\") " Dec 09 15:37:07 crc kubenswrapper[4716]: I1209 15:37:07.527863 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/890bfa10-578a-4a64-8829-b2898dd4e02a-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "890bfa10-578a-4a64-8829-b2898dd4e02a" (UID: "890bfa10-578a-4a64-8829-b2898dd4e02a"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:37:07 crc kubenswrapper[4716]: I1209 15:37:07.527997 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/890bfa10-578a-4a64-8829-b2898dd4e02a-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "890bfa10-578a-4a64-8829-b2898dd4e02a" (UID: "890bfa10-578a-4a64-8829-b2898dd4e02a"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:37:07 crc kubenswrapper[4716]: I1209 15:37:07.528042 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/890bfa10-578a-4a64-8829-b2898dd4e02a-sg-core-conf-yaml\") pod \"890bfa10-578a-4a64-8829-b2898dd4e02a\" (UID: \"890bfa10-578a-4a64-8829-b2898dd4e02a\") " Dec 09 15:37:07 crc kubenswrapper[4716]: I1209 15:37:07.532174 4716 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/890bfa10-578a-4a64-8829-b2898dd4e02a-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:07 crc kubenswrapper[4716]: I1209 15:37:07.532239 4716 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/890bfa10-578a-4a64-8829-b2898dd4e02a-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:07 crc kubenswrapper[4716]: I1209 15:37:07.533822 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/890bfa10-578a-4a64-8829-b2898dd4e02a-scripts" (OuterVolumeSpecName: "scripts") pod "890bfa10-578a-4a64-8829-b2898dd4e02a" (UID: "890bfa10-578a-4a64-8829-b2898dd4e02a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:37:07 crc kubenswrapper[4716]: I1209 15:37:07.539285 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/890bfa10-578a-4a64-8829-b2898dd4e02a-kube-api-access-kq29d" (OuterVolumeSpecName: "kube-api-access-kq29d") pod "890bfa10-578a-4a64-8829-b2898dd4e02a" (UID: "890bfa10-578a-4a64-8829-b2898dd4e02a"). InnerVolumeSpecName "kube-api-access-kq29d". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:37:07 crc kubenswrapper[4716]: I1209 15:37:07.570607 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/890bfa10-578a-4a64-8829-b2898dd4e02a-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "890bfa10-578a-4a64-8829-b2898dd4e02a" (UID: "890bfa10-578a-4a64-8829-b2898dd4e02a"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:37:07 crc kubenswrapper[4716]: I1209 15:37:07.635425 4716 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/890bfa10-578a-4a64-8829-b2898dd4e02a-scripts\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:07 crc kubenswrapper[4716]: I1209 15:37:07.635473 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kq29d\" (UniqueName: \"kubernetes.io/projected/890bfa10-578a-4a64-8829-b2898dd4e02a-kube-api-access-kq29d\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:07 crc kubenswrapper[4716]: I1209 15:37:07.635497 4716 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/890bfa10-578a-4a64-8829-b2898dd4e02a-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:07 crc kubenswrapper[4716]: I1209 15:37:07.662040 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/890bfa10-578a-4a64-8829-b2898dd4e02a-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "890bfa10-578a-4a64-8829-b2898dd4e02a" (UID: "890bfa10-578a-4a64-8829-b2898dd4e02a"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:37:07 crc kubenswrapper[4716]: I1209 15:37:07.670702 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/890bfa10-578a-4a64-8829-b2898dd4e02a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "890bfa10-578a-4a64-8829-b2898dd4e02a" (UID: "890bfa10-578a-4a64-8829-b2898dd4e02a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:37:07 crc kubenswrapper[4716]: I1209 15:37:07.722745 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/890bfa10-578a-4a64-8829-b2898dd4e02a-config-data" (OuterVolumeSpecName: "config-data") pod "890bfa10-578a-4a64-8829-b2898dd4e02a" (UID: "890bfa10-578a-4a64-8829-b2898dd4e02a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:37:07 crc kubenswrapper[4716]: I1209 15:37:07.738447 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/890bfa10-578a-4a64-8829-b2898dd4e02a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:07 crc kubenswrapper[4716]: I1209 15:37:07.738503 4716 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/890bfa10-578a-4a64-8829-b2898dd4e02a-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:07 crc kubenswrapper[4716]: I1209 15:37:07.738516 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/890bfa10-578a-4a64-8829-b2898dd4e02a-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.056771 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"890bfa10-578a-4a64-8829-b2898dd4e02a","Type":"ContainerDied","Data":"8587d3978013827c7d9ad87e662c364055e613accf32ddd4466a156dc9757d3d"} Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.056876 4716 scope.go:117] "RemoveContainer" containerID="bd85c13b06e519867a19e91109220c112981d6fb55edd5f54a8793b135718ca4" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.057194 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.090234 4716 scope.go:117] "RemoveContainer" containerID="a3180779017091dd283d6463f60890dd7527f3e991b2f5c90d5137befe4db645" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.132100 4716 scope.go:117] "RemoveContainer" containerID="10cbf7908a1b4272450e9e0370647ee92ba9c3d4f24bfafa1a7f1d89b2d0b2dc" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.139008 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.158675 4716 scope.go:117] "RemoveContainer" containerID="35256f2291ecc9c70fbd323d34d8e7201c8013357ceeea56685c750b50e6ba33" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.159920 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.177659 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:37:08 crc kubenswrapper[4716]: E1209 15:37:08.178245 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="890bfa10-578a-4a64-8829-b2898dd4e02a" containerName="ceilometer-central-agent" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.178272 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="890bfa10-578a-4a64-8829-b2898dd4e02a" containerName="ceilometer-central-agent" Dec 09 15:37:08 crc kubenswrapper[4716]: E1209 15:37:08.178293 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="890bfa10-578a-4a64-8829-b2898dd4e02a" containerName="ceilometer-notification-agent" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.178299 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="890bfa10-578a-4a64-8829-b2898dd4e02a" containerName="ceilometer-notification-agent" Dec 09 15:37:08 crc kubenswrapper[4716]: E1209 15:37:08.178312 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="890bfa10-578a-4a64-8829-b2898dd4e02a" containerName="sg-core" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.178320 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="890bfa10-578a-4a64-8829-b2898dd4e02a" containerName="sg-core" Dec 09 15:37:08 crc kubenswrapper[4716]: E1209 15:37:08.178354 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="890bfa10-578a-4a64-8829-b2898dd4e02a" containerName="proxy-httpd" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.178360 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="890bfa10-578a-4a64-8829-b2898dd4e02a" containerName="proxy-httpd" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.178610 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="890bfa10-578a-4a64-8829-b2898dd4e02a" containerName="ceilometer-notification-agent" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.178660 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="890bfa10-578a-4a64-8829-b2898dd4e02a" containerName="ceilometer-central-agent" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.178673 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="890bfa10-578a-4a64-8829-b2898dd4e02a" containerName="sg-core" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.178684 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="890bfa10-578a-4a64-8829-b2898dd4e02a" containerName="proxy-httpd" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.181256 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.189206 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.189206 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.196099 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.200676 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.256326 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lz724\" (UniqueName: \"kubernetes.io/projected/29cebb2d-8cdb-49de-a29d-1d02808e46a9-kube-api-access-lz724\") pod \"ceilometer-0\" (UID: \"29cebb2d-8cdb-49de-a29d-1d02808e46a9\") " pod="openstack/ceilometer-0" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.256757 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/29cebb2d-8cdb-49de-a29d-1d02808e46a9-scripts\") pod \"ceilometer-0\" (UID: \"29cebb2d-8cdb-49de-a29d-1d02808e46a9\") " pod="openstack/ceilometer-0" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.256951 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/29cebb2d-8cdb-49de-a29d-1d02808e46a9-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"29cebb2d-8cdb-49de-a29d-1d02808e46a9\") " pod="openstack/ceilometer-0" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.257028 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/29cebb2d-8cdb-49de-a29d-1d02808e46a9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"29cebb2d-8cdb-49de-a29d-1d02808e46a9\") " pod="openstack/ceilometer-0" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.257386 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29cebb2d-8cdb-49de-a29d-1d02808e46a9-config-data\") pod \"ceilometer-0\" (UID: \"29cebb2d-8cdb-49de-a29d-1d02808e46a9\") " pod="openstack/ceilometer-0" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.257552 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/29cebb2d-8cdb-49de-a29d-1d02808e46a9-log-httpd\") pod \"ceilometer-0\" (UID: \"29cebb2d-8cdb-49de-a29d-1d02808e46a9\") " pod="openstack/ceilometer-0" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.257614 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29cebb2d-8cdb-49de-a29d-1d02808e46a9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"29cebb2d-8cdb-49de-a29d-1d02808e46a9\") " pod="openstack/ceilometer-0" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.257766 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/29cebb2d-8cdb-49de-a29d-1d02808e46a9-run-httpd\") pod \"ceilometer-0\" (UID: \"29cebb2d-8cdb-49de-a29d-1d02808e46a9\") " pod="openstack/ceilometer-0" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.359560 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/29cebb2d-8cdb-49de-a29d-1d02808e46a9-scripts\") pod \"ceilometer-0\" (UID: \"29cebb2d-8cdb-49de-a29d-1d02808e46a9\") " pod="openstack/ceilometer-0" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.359689 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/29cebb2d-8cdb-49de-a29d-1d02808e46a9-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"29cebb2d-8cdb-49de-a29d-1d02808e46a9\") " pod="openstack/ceilometer-0" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.359719 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/29cebb2d-8cdb-49de-a29d-1d02808e46a9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"29cebb2d-8cdb-49de-a29d-1d02808e46a9\") " pod="openstack/ceilometer-0" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.359828 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29cebb2d-8cdb-49de-a29d-1d02808e46a9-config-data\") pod \"ceilometer-0\" (UID: \"29cebb2d-8cdb-49de-a29d-1d02808e46a9\") " pod="openstack/ceilometer-0" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.359864 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/29cebb2d-8cdb-49de-a29d-1d02808e46a9-log-httpd\") pod \"ceilometer-0\" (UID: \"29cebb2d-8cdb-49de-a29d-1d02808e46a9\") " pod="openstack/ceilometer-0" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.359882 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29cebb2d-8cdb-49de-a29d-1d02808e46a9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"29cebb2d-8cdb-49de-a29d-1d02808e46a9\") " pod="openstack/ceilometer-0" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.359922 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/29cebb2d-8cdb-49de-a29d-1d02808e46a9-run-httpd\") pod \"ceilometer-0\" (UID: \"29cebb2d-8cdb-49de-a29d-1d02808e46a9\") " pod="openstack/ceilometer-0" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.359964 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lz724\" (UniqueName: \"kubernetes.io/projected/29cebb2d-8cdb-49de-a29d-1d02808e46a9-kube-api-access-lz724\") pod \"ceilometer-0\" (UID: \"29cebb2d-8cdb-49de-a29d-1d02808e46a9\") " pod="openstack/ceilometer-0" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.360902 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/29cebb2d-8cdb-49de-a29d-1d02808e46a9-log-httpd\") pod \"ceilometer-0\" (UID: \"29cebb2d-8cdb-49de-a29d-1d02808e46a9\") " pod="openstack/ceilometer-0" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.361122 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/29cebb2d-8cdb-49de-a29d-1d02808e46a9-run-httpd\") pod \"ceilometer-0\" (UID: \"29cebb2d-8cdb-49de-a29d-1d02808e46a9\") " pod="openstack/ceilometer-0" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.365822 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29cebb2d-8cdb-49de-a29d-1d02808e46a9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"29cebb2d-8cdb-49de-a29d-1d02808e46a9\") " pod="openstack/ceilometer-0" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.375363 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29cebb2d-8cdb-49de-a29d-1d02808e46a9-config-data\") pod \"ceilometer-0\" (UID: \"29cebb2d-8cdb-49de-a29d-1d02808e46a9\") " pod="openstack/ceilometer-0" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.376391 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/29cebb2d-8cdb-49de-a29d-1d02808e46a9-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"29cebb2d-8cdb-49de-a29d-1d02808e46a9\") " pod="openstack/ceilometer-0" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.377774 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/29cebb2d-8cdb-49de-a29d-1d02808e46a9-scripts\") pod \"ceilometer-0\" (UID: \"29cebb2d-8cdb-49de-a29d-1d02808e46a9\") " pod="openstack/ceilometer-0" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.379335 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lz724\" (UniqueName: \"kubernetes.io/projected/29cebb2d-8cdb-49de-a29d-1d02808e46a9-kube-api-access-lz724\") pod \"ceilometer-0\" (UID: \"29cebb2d-8cdb-49de-a29d-1d02808e46a9\") " pod="openstack/ceilometer-0" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.394251 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/29cebb2d-8cdb-49de-a29d-1d02808e46a9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"29cebb2d-8cdb-49de-a29d-1d02808e46a9\") " pod="openstack/ceilometer-0" Dec 09 15:37:08 crc kubenswrapper[4716]: I1209 15:37:08.503039 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 09 15:37:09 crc kubenswrapper[4716]: I1209 15:37:09.072024 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 09 15:37:09 crc kubenswrapper[4716]: E1209 15:37:09.217162 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 15:37:09 crc kubenswrapper[4716]: E1209 15:37:09.217519 4716 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 15:37:09 crc kubenswrapper[4716]: E1209 15:37:09.217789 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5fbh9fh77h667h669h94h589h59dh5bdh5bch9fh68h689h699h559h549h56fh5f5h68fh557h5cch7fh66ch6ch86h56h75h55fh58h5ffhf9h5fdq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lz724,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(29cebb2d-8cdb-49de-a29d-1d02808e46a9): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 15:37:09 crc kubenswrapper[4716]: I1209 15:37:09.233343 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="890bfa10-578a-4a64-8829-b2898dd4e02a" path="/var/lib/kubelet/pods/890bfa10-578a-4a64-8829-b2898dd4e02a/volumes" Dec 09 15:37:10 crc kubenswrapper[4716]: I1209 15:37:10.086567 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"29cebb2d-8cdb-49de-a29d-1d02808e46a9","Type":"ContainerStarted","Data":"110c9699fb6c25eb3b13ad54c2f0721978f3690911cfb105e63aae7b5318f89a"} Dec 09 15:37:10 crc kubenswrapper[4716]: I1209 15:37:10.086972 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"29cebb2d-8cdb-49de-a29d-1d02808e46a9","Type":"ContainerStarted","Data":"a450744e439be03f3f2442ba3b548046bab8dae2e878bd5224c1c9634d10c4cb"} Dec 09 15:37:11 crc kubenswrapper[4716]: I1209 15:37:11.101763 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"29cebb2d-8cdb-49de-a29d-1d02808e46a9","Type":"ContainerStarted","Data":"301939cec838f2885b712e039b31004d17b87f6c67ec2a7654e43bd4c1ea6c62"} Dec 09 15:37:11 crc kubenswrapper[4716]: I1209 15:37:11.276047 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="f417726f-0022-42f5-bfe8-79f6605d557c" containerName="rabbitmq" containerID="cri-o://f9f96b7789a17dba8336bea793242c1bb90dd8c961187d7e058079dc8f34f68f" gracePeriod=604796 Dec 09 15:37:11 crc kubenswrapper[4716]: I1209 15:37:11.816040 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="2e140762-44f7-46f9-9bbe-a8f780186869" containerName="rabbitmq" containerID="cri-o://7696f50534aecaa2dbb9aaa696207604da35af9cd5ff4df8ef5c7fbb7c79ce7d" gracePeriod=604795 Dec 09 15:37:12 crc kubenswrapper[4716]: E1209 15:37:12.496885 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:37:13 crc kubenswrapper[4716]: I1209 15:37:13.127381 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"29cebb2d-8cdb-49de-a29d-1d02808e46a9","Type":"ContainerStarted","Data":"323fc844869adf134a6dd8ba27e1121c5222a6374008e33a420ca42a55cd09e5"} Dec 09 15:37:13 crc kubenswrapper[4716]: I1209 15:37:13.127579 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 09 15:37:13 crc kubenswrapper[4716]: E1209 15:37:13.129744 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:37:13 crc kubenswrapper[4716]: I1209 15:37:13.339245 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="f417726f-0022-42f5-bfe8-79f6605d557c" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.130:5671: connect: connection refused" Dec 09 15:37:14 crc kubenswrapper[4716]: E1209 15:37:14.140492 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:37:17 crc kubenswrapper[4716]: I1209 15:37:17.217714 4716 scope.go:117] "RemoveContainer" containerID="f23d8f973b0c391889ecea36a999ba03ea85dda0d274716304ac66db87b6b8dc" Dec 09 15:37:17 crc kubenswrapper[4716]: E1209 15:37:17.218592 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:37:17 crc kubenswrapper[4716]: I1209 15:37:17.943945 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.123218 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f417726f-0022-42f5-bfe8-79f6605d557c-erlang-cookie-secret\") pod \"f417726f-0022-42f5-bfe8-79f6605d557c\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.123367 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f417726f-0022-42f5-bfe8-79f6605d557c-rabbitmq-confd\") pod \"f417726f-0022-42f5-bfe8-79f6605d557c\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.123490 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f417726f-0022-42f5-bfe8-79f6605d557c-server-conf\") pod \"f417726f-0022-42f5-bfe8-79f6605d557c\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.123568 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f417726f-0022-42f5-bfe8-79f6605d557c-plugins-conf\") pod \"f417726f-0022-42f5-bfe8-79f6605d557c\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.123611 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f417726f-0022-42f5-bfe8-79f6605d557c-pod-info\") pod \"f417726f-0022-42f5-bfe8-79f6605d557c\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.123659 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f417726f-0022-42f5-bfe8-79f6605d557c-rabbitmq-plugins\") pod \"f417726f-0022-42f5-bfe8-79f6605d557c\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.123682 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"f417726f-0022-42f5-bfe8-79f6605d557c\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.123751 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f417726f-0022-42f5-bfe8-79f6605d557c-config-data\") pod \"f417726f-0022-42f5-bfe8-79f6605d557c\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.123775 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7p9l2\" (UniqueName: \"kubernetes.io/projected/f417726f-0022-42f5-bfe8-79f6605d557c-kube-api-access-7p9l2\") pod \"f417726f-0022-42f5-bfe8-79f6605d557c\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.123837 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f417726f-0022-42f5-bfe8-79f6605d557c-rabbitmq-tls\") pod \"f417726f-0022-42f5-bfe8-79f6605d557c\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.123967 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f417726f-0022-42f5-bfe8-79f6605d557c-rabbitmq-erlang-cookie\") pod \"f417726f-0022-42f5-bfe8-79f6605d557c\" (UID: \"f417726f-0022-42f5-bfe8-79f6605d557c\") " Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.124590 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f417726f-0022-42f5-bfe8-79f6605d557c-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "f417726f-0022-42f5-bfe8-79f6605d557c" (UID: "f417726f-0022-42f5-bfe8-79f6605d557c"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.124850 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f417726f-0022-42f5-bfe8-79f6605d557c-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "f417726f-0022-42f5-bfe8-79f6605d557c" (UID: "f417726f-0022-42f5-bfe8-79f6605d557c"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.125561 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f417726f-0022-42f5-bfe8-79f6605d557c-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "f417726f-0022-42f5-bfe8-79f6605d557c" (UID: "f417726f-0022-42f5-bfe8-79f6605d557c"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.131955 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "persistence") pod "f417726f-0022-42f5-bfe8-79f6605d557c" (UID: "f417726f-0022-42f5-bfe8-79f6605d557c"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.132421 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/f417726f-0022-42f5-bfe8-79f6605d557c-pod-info" (OuterVolumeSpecName: "pod-info") pod "f417726f-0022-42f5-bfe8-79f6605d557c" (UID: "f417726f-0022-42f5-bfe8-79f6605d557c"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.132461 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f417726f-0022-42f5-bfe8-79f6605d557c-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "f417726f-0022-42f5-bfe8-79f6605d557c" (UID: "f417726f-0022-42f5-bfe8-79f6605d557c"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.132536 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f417726f-0022-42f5-bfe8-79f6605d557c-kube-api-access-7p9l2" (OuterVolumeSpecName: "kube-api-access-7p9l2") pod "f417726f-0022-42f5-bfe8-79f6605d557c" (UID: "f417726f-0022-42f5-bfe8-79f6605d557c"). InnerVolumeSpecName "kube-api-access-7p9l2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.133989 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f417726f-0022-42f5-bfe8-79f6605d557c-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "f417726f-0022-42f5-bfe8-79f6605d557c" (UID: "f417726f-0022-42f5-bfe8-79f6605d557c"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.164335 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f417726f-0022-42f5-bfe8-79f6605d557c-config-data" (OuterVolumeSpecName: "config-data") pod "f417726f-0022-42f5-bfe8-79f6605d557c" (UID: "f417726f-0022-42f5-bfe8-79f6605d557c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.197093 4716 generic.go:334] "Generic (PLEG): container finished" podID="2e140762-44f7-46f9-9bbe-a8f780186869" containerID="7696f50534aecaa2dbb9aaa696207604da35af9cd5ff4df8ef5c7fbb7c79ce7d" exitCode=0 Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.197204 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"2e140762-44f7-46f9-9bbe-a8f780186869","Type":"ContainerDied","Data":"7696f50534aecaa2dbb9aaa696207604da35af9cd5ff4df8ef5c7fbb7c79ce7d"} Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.199465 4716 generic.go:334] "Generic (PLEG): container finished" podID="f417726f-0022-42f5-bfe8-79f6605d557c" containerID="f9f96b7789a17dba8336bea793242c1bb90dd8c961187d7e058079dc8f34f68f" exitCode=0 Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.199495 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f417726f-0022-42f5-bfe8-79f6605d557c","Type":"ContainerDied","Data":"f9f96b7789a17dba8336bea793242c1bb90dd8c961187d7e058079dc8f34f68f"} Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.199515 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f417726f-0022-42f5-bfe8-79f6605d557c","Type":"ContainerDied","Data":"109d2ea91abfd32719018d9e4affafb6c5882e449d187fb5f6b8096e056779e5"} Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.199533 4716 scope.go:117] "RemoveContainer" containerID="f9f96b7789a17dba8336bea793242c1bb90dd8c961187d7e058079dc8f34f68f" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.199701 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.223356 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f417726f-0022-42f5-bfe8-79f6605d557c-server-conf" (OuterVolumeSpecName: "server-conf") pod "f417726f-0022-42f5-bfe8-79f6605d557c" (UID: "f417726f-0022-42f5-bfe8-79f6605d557c"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.227077 4716 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f417726f-0022-42f5-bfe8-79f6605d557c-server-conf\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.227212 4716 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f417726f-0022-42f5-bfe8-79f6605d557c-plugins-conf\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.227235 4716 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f417726f-0022-42f5-bfe8-79f6605d557c-pod-info\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.227253 4716 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f417726f-0022-42f5-bfe8-79f6605d557c-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.227290 4716 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.227303 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f417726f-0022-42f5-bfe8-79f6605d557c-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.227318 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7p9l2\" (UniqueName: \"kubernetes.io/projected/f417726f-0022-42f5-bfe8-79f6605d557c-kube-api-access-7p9l2\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.227332 4716 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f417726f-0022-42f5-bfe8-79f6605d557c-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.227346 4716 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f417726f-0022-42f5-bfe8-79f6605d557c-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.227358 4716 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f417726f-0022-42f5-bfe8-79f6605d557c-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.246704 4716 scope.go:117] "RemoveContainer" containerID="ac28d70edbf9692f0120f0aceb7ea5384f675a7413c2f464665d3e608f33765c" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.275655 4716 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.277376 4716 scope.go:117] "RemoveContainer" containerID="f9f96b7789a17dba8336bea793242c1bb90dd8c961187d7e058079dc8f34f68f" Dec 09 15:37:18 crc kubenswrapper[4716]: E1209 15:37:18.277928 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f9f96b7789a17dba8336bea793242c1bb90dd8c961187d7e058079dc8f34f68f\": container with ID starting with f9f96b7789a17dba8336bea793242c1bb90dd8c961187d7e058079dc8f34f68f not found: ID does not exist" containerID="f9f96b7789a17dba8336bea793242c1bb90dd8c961187d7e058079dc8f34f68f" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.277995 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f9f96b7789a17dba8336bea793242c1bb90dd8c961187d7e058079dc8f34f68f"} err="failed to get container status \"f9f96b7789a17dba8336bea793242c1bb90dd8c961187d7e058079dc8f34f68f\": rpc error: code = NotFound desc = could not find container \"f9f96b7789a17dba8336bea793242c1bb90dd8c961187d7e058079dc8f34f68f\": container with ID starting with f9f96b7789a17dba8336bea793242c1bb90dd8c961187d7e058079dc8f34f68f not found: ID does not exist" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.278026 4716 scope.go:117] "RemoveContainer" containerID="ac28d70edbf9692f0120f0aceb7ea5384f675a7413c2f464665d3e608f33765c" Dec 09 15:37:18 crc kubenswrapper[4716]: E1209 15:37:18.278343 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac28d70edbf9692f0120f0aceb7ea5384f675a7413c2f464665d3e608f33765c\": container with ID starting with ac28d70edbf9692f0120f0aceb7ea5384f675a7413c2f464665d3e608f33765c not found: ID does not exist" containerID="ac28d70edbf9692f0120f0aceb7ea5384f675a7413c2f464665d3e608f33765c" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.278371 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac28d70edbf9692f0120f0aceb7ea5384f675a7413c2f464665d3e608f33765c"} err="failed to get container status \"ac28d70edbf9692f0120f0aceb7ea5384f675a7413c2f464665d3e608f33765c\": rpc error: code = NotFound desc = could not find container \"ac28d70edbf9692f0120f0aceb7ea5384f675a7413c2f464665d3e608f33765c\": container with ID starting with ac28d70edbf9692f0120f0aceb7ea5384f675a7413c2f464665d3e608f33765c not found: ID does not exist" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.331122 4716 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:18 crc kubenswrapper[4716]: E1209 15:37:18.344334 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 09 15:37:18 crc kubenswrapper[4716]: E1209 15:37:18.344400 4716 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 09 15:37:18 crc kubenswrapper[4716]: E1209 15:37:18.344595 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qfm49,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-msshl_openstack(239c4119-944c-46e1-9425-285eeb6e0204): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 15:37:18 crc kubenswrapper[4716]: E1209 15:37:18.346307 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.348384 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f417726f-0022-42f5-bfe8-79f6605d557c-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "f417726f-0022-42f5-bfe8-79f6605d557c" (UID: "f417726f-0022-42f5-bfe8-79f6605d557c"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.434160 4716 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f417726f-0022-42f5-bfe8-79f6605d557c-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.470336 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.624478 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.639677 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-25bbc\" (UniqueName: \"kubernetes.io/projected/2e140762-44f7-46f9-9bbe-a8f780186869-kube-api-access-25bbc\") pod \"2e140762-44f7-46f9-9bbe-a8f780186869\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.639770 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2e140762-44f7-46f9-9bbe-a8f780186869-erlang-cookie-secret\") pod \"2e140762-44f7-46f9-9bbe-a8f780186869\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.639819 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"2e140762-44f7-46f9-9bbe-a8f780186869\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.639851 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2e140762-44f7-46f9-9bbe-a8f780186869-pod-info\") pod \"2e140762-44f7-46f9-9bbe-a8f780186869\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.640023 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2e140762-44f7-46f9-9bbe-a8f780186869-plugins-conf\") pod \"2e140762-44f7-46f9-9bbe-a8f780186869\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.640042 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2e140762-44f7-46f9-9bbe-a8f780186869-rabbitmq-plugins\") pod \"2e140762-44f7-46f9-9bbe-a8f780186869\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.640094 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2e140762-44f7-46f9-9bbe-a8f780186869-rabbitmq-confd\") pod \"2e140762-44f7-46f9-9bbe-a8f780186869\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.640204 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2e140762-44f7-46f9-9bbe-a8f780186869-rabbitmq-erlang-cookie\") pod \"2e140762-44f7-46f9-9bbe-a8f780186869\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.640245 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2e140762-44f7-46f9-9bbe-a8f780186869-server-conf\") pod \"2e140762-44f7-46f9-9bbe-a8f780186869\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.640261 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2e140762-44f7-46f9-9bbe-a8f780186869-rabbitmq-tls\") pod \"2e140762-44f7-46f9-9bbe-a8f780186869\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.640291 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2e140762-44f7-46f9-9bbe-a8f780186869-config-data\") pod \"2e140762-44f7-46f9-9bbe-a8f780186869\" (UID: \"2e140762-44f7-46f9-9bbe-a8f780186869\") " Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.641174 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2e140762-44f7-46f9-9bbe-a8f780186869-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "2e140762-44f7-46f9-9bbe-a8f780186869" (UID: "2e140762-44f7-46f9-9bbe-a8f780186869"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.645446 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2e140762-44f7-46f9-9bbe-a8f780186869-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "2e140762-44f7-46f9-9bbe-a8f780186869" (UID: "2e140762-44f7-46f9-9bbe-a8f780186869"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.649583 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e140762-44f7-46f9-9bbe-a8f780186869-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "2e140762-44f7-46f9-9bbe-a8f780186869" (UID: "2e140762-44f7-46f9-9bbe-a8f780186869"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.653953 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.655232 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e140762-44f7-46f9-9bbe-a8f780186869-kube-api-access-25bbc" (OuterVolumeSpecName: "kube-api-access-25bbc") pod "2e140762-44f7-46f9-9bbe-a8f780186869" (UID: "2e140762-44f7-46f9-9bbe-a8f780186869"). InnerVolumeSpecName "kube-api-access-25bbc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.661461 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "persistence") pod "2e140762-44f7-46f9-9bbe-a8f780186869" (UID: "2e140762-44f7-46f9-9bbe-a8f780186869"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.663065 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e140762-44f7-46f9-9bbe-a8f780186869-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "2e140762-44f7-46f9-9bbe-a8f780186869" (UID: "2e140762-44f7-46f9-9bbe-a8f780186869"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.663851 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e140762-44f7-46f9-9bbe-a8f780186869-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "2e140762-44f7-46f9-9bbe-a8f780186869" (UID: "2e140762-44f7-46f9-9bbe-a8f780186869"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.677227 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.679827 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/2e140762-44f7-46f9-9bbe-a8f780186869-pod-info" (OuterVolumeSpecName: "pod-info") pod "2e140762-44f7-46f9-9bbe-a8f780186869" (UID: "2e140762-44f7-46f9-9bbe-a8f780186869"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Dec 09 15:37:18 crc kubenswrapper[4716]: E1209 15:37:18.682069 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f417726f-0022-42f5-bfe8-79f6605d557c" containerName="setup-container" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.682110 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="f417726f-0022-42f5-bfe8-79f6605d557c" containerName="setup-container" Dec 09 15:37:18 crc kubenswrapper[4716]: E1209 15:37:18.682133 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e140762-44f7-46f9-9bbe-a8f780186869" containerName="setup-container" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.682143 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e140762-44f7-46f9-9bbe-a8f780186869" containerName="setup-container" Dec 09 15:37:18 crc kubenswrapper[4716]: E1209 15:37:18.682157 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e140762-44f7-46f9-9bbe-a8f780186869" containerName="rabbitmq" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.682167 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e140762-44f7-46f9-9bbe-a8f780186869" containerName="rabbitmq" Dec 09 15:37:18 crc kubenswrapper[4716]: E1209 15:37:18.682199 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f417726f-0022-42f5-bfe8-79f6605d557c" containerName="rabbitmq" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.682207 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="f417726f-0022-42f5-bfe8-79f6605d557c" containerName="rabbitmq" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.682613 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="f417726f-0022-42f5-bfe8-79f6605d557c" containerName="rabbitmq" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.682653 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e140762-44f7-46f9-9bbe-a8f780186869" containerName="rabbitmq" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.684501 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.684578 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.692344 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.692596 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.693853 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.694295 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.710834 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-b9frm" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.711159 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.711328 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.746385 4716 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2e140762-44f7-46f9-9bbe-a8f780186869-plugins-conf\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.746429 4716 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2e140762-44f7-46f9-9bbe-a8f780186869-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.746446 4716 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2e140762-44f7-46f9-9bbe-a8f780186869-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.746457 4716 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2e140762-44f7-46f9-9bbe-a8f780186869-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.746468 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-25bbc\" (UniqueName: \"kubernetes.io/projected/2e140762-44f7-46f9-9bbe-a8f780186869-kube-api-access-25bbc\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.746481 4716 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2e140762-44f7-46f9-9bbe-a8f780186869-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.746521 4716 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.746534 4716 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2e140762-44f7-46f9-9bbe-a8f780186869-pod-info\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.767193 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e140762-44f7-46f9-9bbe-a8f780186869-config-data" (OuterVolumeSpecName: "config-data") pod "2e140762-44f7-46f9-9bbe-a8f780186869" (UID: "2e140762-44f7-46f9-9bbe-a8f780186869"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.810431 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e140762-44f7-46f9-9bbe-a8f780186869-server-conf" (OuterVolumeSpecName: "server-conf") pod "2e140762-44f7-46f9-9bbe-a8f780186869" (UID: "2e140762-44f7-46f9-9bbe-a8f780186869"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.834058 4716 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.848636 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b3fcad49-046e-4075-8906-c4629fc77587-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3fcad49-046e-4075-8906-c4629fc77587\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.848678 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b3fcad49-046e-4075-8906-c4629fc77587-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3fcad49-046e-4075-8906-c4629fc77587\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.848722 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b3fcad49-046e-4075-8906-c4629fc77587-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3fcad49-046e-4075-8906-c4629fc77587\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.848766 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b3fcad49-046e-4075-8906-c4629fc77587-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3fcad49-046e-4075-8906-c4629fc77587\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.848793 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b3fcad49-046e-4075-8906-c4629fc77587-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3fcad49-046e-4075-8906-c4629fc77587\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.848842 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b3fcad49-046e-4075-8906-c4629fc77587-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3fcad49-046e-4075-8906-c4629fc77587\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.849109 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b3fcad49-046e-4075-8906-c4629fc77587-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3fcad49-046e-4075-8906-c4629fc77587\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.849135 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b3fcad49-046e-4075-8906-c4629fc77587-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3fcad49-046e-4075-8906-c4629fc77587\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.849220 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3fcad49-046e-4075-8906-c4629fc77587\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.849374 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b3fcad49-046e-4075-8906-c4629fc77587-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3fcad49-046e-4075-8906-c4629fc77587\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.849412 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xvs9p\" (UniqueName: \"kubernetes.io/projected/b3fcad49-046e-4075-8906-c4629fc77587-kube-api-access-xvs9p\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3fcad49-046e-4075-8906-c4629fc77587\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.849531 4716 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.849551 4716 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2e140762-44f7-46f9-9bbe-a8f780186869-server-conf\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.849576 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2e140762-44f7-46f9-9bbe-a8f780186869-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.879832 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e140762-44f7-46f9-9bbe-a8f780186869-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "2e140762-44f7-46f9-9bbe-a8f780186869" (UID: "2e140762-44f7-46f9-9bbe-a8f780186869"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.951464 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b3fcad49-046e-4075-8906-c4629fc77587-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3fcad49-046e-4075-8906-c4629fc77587\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.951614 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b3fcad49-046e-4075-8906-c4629fc77587-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3fcad49-046e-4075-8906-c4629fc77587\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.951902 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b3fcad49-046e-4075-8906-c4629fc77587-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3fcad49-046e-4075-8906-c4629fc77587\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.951926 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b3fcad49-046e-4075-8906-c4629fc77587-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3fcad49-046e-4075-8906-c4629fc77587\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.951951 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b3fcad49-046e-4075-8906-c4629fc77587-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3fcad49-046e-4075-8906-c4629fc77587\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.952073 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3fcad49-046e-4075-8906-c4629fc77587\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.952205 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b3fcad49-046e-4075-8906-c4629fc77587-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3fcad49-046e-4075-8906-c4629fc77587\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.952242 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xvs9p\" (UniqueName: \"kubernetes.io/projected/b3fcad49-046e-4075-8906-c4629fc77587-kube-api-access-xvs9p\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3fcad49-046e-4075-8906-c4629fc77587\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.952311 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b3fcad49-046e-4075-8906-c4629fc77587-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3fcad49-046e-4075-8906-c4629fc77587\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.952335 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b3fcad49-046e-4075-8906-c4629fc77587-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3fcad49-046e-4075-8906-c4629fc77587\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.952378 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b3fcad49-046e-4075-8906-c4629fc77587-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3fcad49-046e-4075-8906-c4629fc77587\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.952451 4716 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2e140762-44f7-46f9-9bbe-a8f780186869-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.952492 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b3fcad49-046e-4075-8906-c4629fc77587-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3fcad49-046e-4075-8906-c4629fc77587\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.952543 4716 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3fcad49-046e-4075-8906-c4629fc77587\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.953169 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b3fcad49-046e-4075-8906-c4629fc77587-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3fcad49-046e-4075-8906-c4629fc77587\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.953521 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b3fcad49-046e-4075-8906-c4629fc77587-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3fcad49-046e-4075-8906-c4629fc77587\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.953726 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b3fcad49-046e-4075-8906-c4629fc77587-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3fcad49-046e-4075-8906-c4629fc77587\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.954054 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b3fcad49-046e-4075-8906-c4629fc77587-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3fcad49-046e-4075-8906-c4629fc77587\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.958939 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b3fcad49-046e-4075-8906-c4629fc77587-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3fcad49-046e-4075-8906-c4629fc77587\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.960561 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b3fcad49-046e-4075-8906-c4629fc77587-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3fcad49-046e-4075-8906-c4629fc77587\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.962180 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b3fcad49-046e-4075-8906-c4629fc77587-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3fcad49-046e-4075-8906-c4629fc77587\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.962246 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b3fcad49-046e-4075-8906-c4629fc77587-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3fcad49-046e-4075-8906-c4629fc77587\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:18 crc kubenswrapper[4716]: I1209 15:37:18.976095 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xvs9p\" (UniqueName: \"kubernetes.io/projected/b3fcad49-046e-4075-8906-c4629fc77587-kube-api-access-xvs9p\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3fcad49-046e-4075-8906-c4629fc77587\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.003885 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3fcad49-046e-4075-8906-c4629fc77587\") " pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.215032 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.238380 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f417726f-0022-42f5-bfe8-79f6605d557c" path="/var/lib/kubelet/pods/f417726f-0022-42f5-bfe8-79f6605d557c/volumes" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.239842 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"2e140762-44f7-46f9-9bbe-a8f780186869","Type":"ContainerDied","Data":"011f85a5a25660be9efc49e788d83e16413bbfe1c4dc8846aef954335394c58e"} Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.239914 4716 scope.go:117] "RemoveContainer" containerID="7696f50534aecaa2dbb9aaa696207604da35af9cd5ff4df8ef5c7fbb7c79ce7d" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.257787 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.281726 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.295702 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.296797 4716 scope.go:117] "RemoveContainer" containerID="30392210ad09dadc4c995e968b32b6d395c64e8fffbf6808673e45e67027cea0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.333713 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.338382 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.345866 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.371352 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.371761 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.371983 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.372160 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.372558 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-t42b4" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.374137 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.396178 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.595443 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/fcc8c279-3c20-4050-95d5-5b71af2134cf-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"fcc8c279-3c20-4050-95d5-5b71af2134cf\") " pod="openstack/rabbitmq-server-0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.602983 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/fcc8c279-3c20-4050-95d5-5b71af2134cf-server-conf\") pod \"rabbitmq-server-0\" (UID: \"fcc8c279-3c20-4050-95d5-5b71af2134cf\") " pod="openstack/rabbitmq-server-0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.603026 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-server-0\" (UID: \"fcc8c279-3c20-4050-95d5-5b71af2134cf\") " pod="openstack/rabbitmq-server-0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.603246 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/fcc8c279-3c20-4050-95d5-5b71af2134cf-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"fcc8c279-3c20-4050-95d5-5b71af2134cf\") " pod="openstack/rabbitmq-server-0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.603311 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x6xqx\" (UniqueName: \"kubernetes.io/projected/fcc8c279-3c20-4050-95d5-5b71af2134cf-kube-api-access-x6xqx\") pod \"rabbitmq-server-0\" (UID: \"fcc8c279-3c20-4050-95d5-5b71af2134cf\") " pod="openstack/rabbitmq-server-0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.603429 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/fcc8c279-3c20-4050-95d5-5b71af2134cf-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"fcc8c279-3c20-4050-95d5-5b71af2134cf\") " pod="openstack/rabbitmq-server-0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.603653 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fcc8c279-3c20-4050-95d5-5b71af2134cf-config-data\") pod \"rabbitmq-server-0\" (UID: \"fcc8c279-3c20-4050-95d5-5b71af2134cf\") " pod="openstack/rabbitmq-server-0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.603718 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/fcc8c279-3c20-4050-95d5-5b71af2134cf-pod-info\") pod \"rabbitmq-server-0\" (UID: \"fcc8c279-3c20-4050-95d5-5b71af2134cf\") " pod="openstack/rabbitmq-server-0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.603747 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/fcc8c279-3c20-4050-95d5-5b71af2134cf-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"fcc8c279-3c20-4050-95d5-5b71af2134cf\") " pod="openstack/rabbitmq-server-0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.603902 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/fcc8c279-3c20-4050-95d5-5b71af2134cf-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"fcc8c279-3c20-4050-95d5-5b71af2134cf\") " pod="openstack/rabbitmq-server-0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.603926 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/fcc8c279-3c20-4050-95d5-5b71af2134cf-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"fcc8c279-3c20-4050-95d5-5b71af2134cf\") " pod="openstack/rabbitmq-server-0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.705412 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/fcc8c279-3c20-4050-95d5-5b71af2134cf-server-conf\") pod \"rabbitmq-server-0\" (UID: \"fcc8c279-3c20-4050-95d5-5b71af2134cf\") " pod="openstack/rabbitmq-server-0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.705647 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-server-0\" (UID: \"fcc8c279-3c20-4050-95d5-5b71af2134cf\") " pod="openstack/rabbitmq-server-0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.705708 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/fcc8c279-3c20-4050-95d5-5b71af2134cf-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"fcc8c279-3c20-4050-95d5-5b71af2134cf\") " pod="openstack/rabbitmq-server-0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.705734 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x6xqx\" (UniqueName: \"kubernetes.io/projected/fcc8c279-3c20-4050-95d5-5b71af2134cf-kube-api-access-x6xqx\") pod \"rabbitmq-server-0\" (UID: \"fcc8c279-3c20-4050-95d5-5b71af2134cf\") " pod="openstack/rabbitmq-server-0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.705782 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/fcc8c279-3c20-4050-95d5-5b71af2134cf-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"fcc8c279-3c20-4050-95d5-5b71af2134cf\") " pod="openstack/rabbitmq-server-0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.705823 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fcc8c279-3c20-4050-95d5-5b71af2134cf-config-data\") pod \"rabbitmq-server-0\" (UID: \"fcc8c279-3c20-4050-95d5-5b71af2134cf\") " pod="openstack/rabbitmq-server-0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.705854 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/fcc8c279-3c20-4050-95d5-5b71af2134cf-pod-info\") pod \"rabbitmq-server-0\" (UID: \"fcc8c279-3c20-4050-95d5-5b71af2134cf\") " pod="openstack/rabbitmq-server-0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.705875 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/fcc8c279-3c20-4050-95d5-5b71af2134cf-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"fcc8c279-3c20-4050-95d5-5b71af2134cf\") " pod="openstack/rabbitmq-server-0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.705924 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/fcc8c279-3c20-4050-95d5-5b71af2134cf-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"fcc8c279-3c20-4050-95d5-5b71af2134cf\") " pod="openstack/rabbitmq-server-0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.705940 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/fcc8c279-3c20-4050-95d5-5b71af2134cf-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"fcc8c279-3c20-4050-95d5-5b71af2134cf\") " pod="openstack/rabbitmq-server-0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.706006 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/fcc8c279-3c20-4050-95d5-5b71af2134cf-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"fcc8c279-3c20-4050-95d5-5b71af2134cf\") " pod="openstack/rabbitmq-server-0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.706258 4716 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-server-0\" (UID: \"fcc8c279-3c20-4050-95d5-5b71af2134cf\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/rabbitmq-server-0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.707129 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/fcc8c279-3c20-4050-95d5-5b71af2134cf-server-conf\") pod \"rabbitmq-server-0\" (UID: \"fcc8c279-3c20-4050-95d5-5b71af2134cf\") " pod="openstack/rabbitmq-server-0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.707975 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/fcc8c279-3c20-4050-95d5-5b71af2134cf-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"fcc8c279-3c20-4050-95d5-5b71af2134cf\") " pod="openstack/rabbitmq-server-0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.708298 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/fcc8c279-3c20-4050-95d5-5b71af2134cf-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"fcc8c279-3c20-4050-95d5-5b71af2134cf\") " pod="openstack/rabbitmq-server-0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.709720 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/fcc8c279-3c20-4050-95d5-5b71af2134cf-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"fcc8c279-3c20-4050-95d5-5b71af2134cf\") " pod="openstack/rabbitmq-server-0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.710246 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fcc8c279-3c20-4050-95d5-5b71af2134cf-config-data\") pod \"rabbitmq-server-0\" (UID: \"fcc8c279-3c20-4050-95d5-5b71af2134cf\") " pod="openstack/rabbitmq-server-0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.715356 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/fcc8c279-3c20-4050-95d5-5b71af2134cf-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"fcc8c279-3c20-4050-95d5-5b71af2134cf\") " pod="openstack/rabbitmq-server-0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.728952 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/fcc8c279-3c20-4050-95d5-5b71af2134cf-pod-info\") pod \"rabbitmq-server-0\" (UID: \"fcc8c279-3c20-4050-95d5-5b71af2134cf\") " pod="openstack/rabbitmq-server-0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.729059 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/fcc8c279-3c20-4050-95d5-5b71af2134cf-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"fcc8c279-3c20-4050-95d5-5b71af2134cf\") " pod="openstack/rabbitmq-server-0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.729317 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/fcc8c279-3c20-4050-95d5-5b71af2134cf-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"fcc8c279-3c20-4050-95d5-5b71af2134cf\") " pod="openstack/rabbitmq-server-0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.742481 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x6xqx\" (UniqueName: \"kubernetes.io/projected/fcc8c279-3c20-4050-95d5-5b71af2134cf-kube-api-access-x6xqx\") pod \"rabbitmq-server-0\" (UID: \"fcc8c279-3c20-4050-95d5-5b71af2134cf\") " pod="openstack/rabbitmq-server-0" Dec 09 15:37:19 crc kubenswrapper[4716]: I1209 15:37:19.779661 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-server-0\" (UID: \"fcc8c279-3c20-4050-95d5-5b71af2134cf\") " pod="openstack/rabbitmq-server-0" Dec 09 15:37:20 crc kubenswrapper[4716]: I1209 15:37:20.004291 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 09 15:37:20 crc kubenswrapper[4716]: I1209 15:37:20.205581 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 09 15:37:20 crc kubenswrapper[4716]: I1209 15:37:20.242369 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b3fcad49-046e-4075-8906-c4629fc77587","Type":"ContainerStarted","Data":"2b42dcdd08fb297ca3acc4d21fb41e43a0735faa1faf5e37addda6ff9bc0540b"} Dec 09 15:37:20 crc kubenswrapper[4716]: I1209 15:37:20.523609 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 09 15:37:21 crc kubenswrapper[4716]: I1209 15:37:21.226927 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e140762-44f7-46f9-9bbe-a8f780186869" path="/var/lib/kubelet/pods/2e140762-44f7-46f9-9bbe-a8f780186869/volumes" Dec 09 15:37:21 crc kubenswrapper[4716]: I1209 15:37:21.269231 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"fcc8c279-3c20-4050-95d5-5b71af2134cf","Type":"ContainerStarted","Data":"8f5bddc184110c69cf32fe39a88a99875f1ce46df8f439a3fe7399bcaf73c3d0"} Dec 09 15:37:21 crc kubenswrapper[4716]: I1209 15:37:21.375678 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7d84b4d45c-xhg5h"] Dec 09 15:37:21 crc kubenswrapper[4716]: I1209 15:37:21.377847 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d84b4d45c-xhg5h" Dec 09 15:37:21 crc kubenswrapper[4716]: I1209 15:37:21.386588 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Dec 09 15:37:21 crc kubenswrapper[4716]: I1209 15:37:21.399354 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7d84b4d45c-xhg5h"] Dec 09 15:37:21 crc kubenswrapper[4716]: I1209 15:37:21.463805 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f0007b83-d58b-4cbf-90d2-53001aaf53f2-ovsdbserver-sb\") pod \"dnsmasq-dns-7d84b4d45c-xhg5h\" (UID: \"f0007b83-d58b-4cbf-90d2-53001aaf53f2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-xhg5h" Dec 09 15:37:21 crc kubenswrapper[4716]: I1209 15:37:21.463888 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0007b83-d58b-4cbf-90d2-53001aaf53f2-config\") pod \"dnsmasq-dns-7d84b4d45c-xhg5h\" (UID: \"f0007b83-d58b-4cbf-90d2-53001aaf53f2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-xhg5h" Dec 09 15:37:21 crc kubenswrapper[4716]: I1209 15:37:21.464091 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f0007b83-d58b-4cbf-90d2-53001aaf53f2-ovsdbserver-nb\") pod \"dnsmasq-dns-7d84b4d45c-xhg5h\" (UID: \"f0007b83-d58b-4cbf-90d2-53001aaf53f2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-xhg5h" Dec 09 15:37:21 crc kubenswrapper[4716]: I1209 15:37:21.464125 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f0007b83-d58b-4cbf-90d2-53001aaf53f2-dns-svc\") pod \"dnsmasq-dns-7d84b4d45c-xhg5h\" (UID: \"f0007b83-d58b-4cbf-90d2-53001aaf53f2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-xhg5h" Dec 09 15:37:21 crc kubenswrapper[4716]: I1209 15:37:21.464148 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f0007b83-d58b-4cbf-90d2-53001aaf53f2-dns-swift-storage-0\") pod \"dnsmasq-dns-7d84b4d45c-xhg5h\" (UID: \"f0007b83-d58b-4cbf-90d2-53001aaf53f2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-xhg5h" Dec 09 15:37:21 crc kubenswrapper[4716]: I1209 15:37:21.464396 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/f0007b83-d58b-4cbf-90d2-53001aaf53f2-openstack-edpm-ipam\") pod \"dnsmasq-dns-7d84b4d45c-xhg5h\" (UID: \"f0007b83-d58b-4cbf-90d2-53001aaf53f2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-xhg5h" Dec 09 15:37:21 crc kubenswrapper[4716]: I1209 15:37:21.464444 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x95vw\" (UniqueName: \"kubernetes.io/projected/f0007b83-d58b-4cbf-90d2-53001aaf53f2-kube-api-access-x95vw\") pod \"dnsmasq-dns-7d84b4d45c-xhg5h\" (UID: \"f0007b83-d58b-4cbf-90d2-53001aaf53f2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-xhg5h" Dec 09 15:37:21 crc kubenswrapper[4716]: I1209 15:37:21.566571 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/f0007b83-d58b-4cbf-90d2-53001aaf53f2-openstack-edpm-ipam\") pod \"dnsmasq-dns-7d84b4d45c-xhg5h\" (UID: \"f0007b83-d58b-4cbf-90d2-53001aaf53f2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-xhg5h" Dec 09 15:37:21 crc kubenswrapper[4716]: I1209 15:37:21.566666 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x95vw\" (UniqueName: \"kubernetes.io/projected/f0007b83-d58b-4cbf-90d2-53001aaf53f2-kube-api-access-x95vw\") pod \"dnsmasq-dns-7d84b4d45c-xhg5h\" (UID: \"f0007b83-d58b-4cbf-90d2-53001aaf53f2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-xhg5h" Dec 09 15:37:21 crc kubenswrapper[4716]: I1209 15:37:21.566731 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f0007b83-d58b-4cbf-90d2-53001aaf53f2-ovsdbserver-sb\") pod \"dnsmasq-dns-7d84b4d45c-xhg5h\" (UID: \"f0007b83-d58b-4cbf-90d2-53001aaf53f2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-xhg5h" Dec 09 15:37:21 crc kubenswrapper[4716]: I1209 15:37:21.566798 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0007b83-d58b-4cbf-90d2-53001aaf53f2-config\") pod \"dnsmasq-dns-7d84b4d45c-xhg5h\" (UID: \"f0007b83-d58b-4cbf-90d2-53001aaf53f2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-xhg5h" Dec 09 15:37:21 crc kubenswrapper[4716]: I1209 15:37:21.567004 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f0007b83-d58b-4cbf-90d2-53001aaf53f2-ovsdbserver-nb\") pod \"dnsmasq-dns-7d84b4d45c-xhg5h\" (UID: \"f0007b83-d58b-4cbf-90d2-53001aaf53f2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-xhg5h" Dec 09 15:37:21 crc kubenswrapper[4716]: I1209 15:37:21.567033 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f0007b83-d58b-4cbf-90d2-53001aaf53f2-dns-svc\") pod \"dnsmasq-dns-7d84b4d45c-xhg5h\" (UID: \"f0007b83-d58b-4cbf-90d2-53001aaf53f2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-xhg5h" Dec 09 15:37:21 crc kubenswrapper[4716]: I1209 15:37:21.567059 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f0007b83-d58b-4cbf-90d2-53001aaf53f2-dns-swift-storage-0\") pod \"dnsmasq-dns-7d84b4d45c-xhg5h\" (UID: \"f0007b83-d58b-4cbf-90d2-53001aaf53f2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-xhg5h" Dec 09 15:37:21 crc kubenswrapper[4716]: I1209 15:37:21.567758 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f0007b83-d58b-4cbf-90d2-53001aaf53f2-ovsdbserver-sb\") pod \"dnsmasq-dns-7d84b4d45c-xhg5h\" (UID: \"f0007b83-d58b-4cbf-90d2-53001aaf53f2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-xhg5h" Dec 09 15:37:21 crc kubenswrapper[4716]: I1209 15:37:21.567957 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0007b83-d58b-4cbf-90d2-53001aaf53f2-config\") pod \"dnsmasq-dns-7d84b4d45c-xhg5h\" (UID: \"f0007b83-d58b-4cbf-90d2-53001aaf53f2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-xhg5h" Dec 09 15:37:21 crc kubenswrapper[4716]: I1209 15:37:21.567953 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f0007b83-d58b-4cbf-90d2-53001aaf53f2-ovsdbserver-nb\") pod \"dnsmasq-dns-7d84b4d45c-xhg5h\" (UID: \"f0007b83-d58b-4cbf-90d2-53001aaf53f2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-xhg5h" Dec 09 15:37:21 crc kubenswrapper[4716]: I1209 15:37:21.568216 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/f0007b83-d58b-4cbf-90d2-53001aaf53f2-openstack-edpm-ipam\") pod \"dnsmasq-dns-7d84b4d45c-xhg5h\" (UID: \"f0007b83-d58b-4cbf-90d2-53001aaf53f2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-xhg5h" Dec 09 15:37:21 crc kubenswrapper[4716]: I1209 15:37:21.568238 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f0007b83-d58b-4cbf-90d2-53001aaf53f2-dns-svc\") pod \"dnsmasq-dns-7d84b4d45c-xhg5h\" (UID: \"f0007b83-d58b-4cbf-90d2-53001aaf53f2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-xhg5h" Dec 09 15:37:21 crc kubenswrapper[4716]: I1209 15:37:21.568314 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f0007b83-d58b-4cbf-90d2-53001aaf53f2-dns-swift-storage-0\") pod \"dnsmasq-dns-7d84b4d45c-xhg5h\" (UID: \"f0007b83-d58b-4cbf-90d2-53001aaf53f2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-xhg5h" Dec 09 15:37:21 crc kubenswrapper[4716]: I1209 15:37:21.599409 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x95vw\" (UniqueName: \"kubernetes.io/projected/f0007b83-d58b-4cbf-90d2-53001aaf53f2-kube-api-access-x95vw\") pod \"dnsmasq-dns-7d84b4d45c-xhg5h\" (UID: \"f0007b83-d58b-4cbf-90d2-53001aaf53f2\") " pod="openstack/dnsmasq-dns-7d84b4d45c-xhg5h" Dec 09 15:37:21 crc kubenswrapper[4716]: I1209 15:37:21.714825 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d84b4d45c-xhg5h" Dec 09 15:37:22 crc kubenswrapper[4716]: I1209 15:37:22.370795 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7d84b4d45c-xhg5h"] Dec 09 15:37:22 crc kubenswrapper[4716]: W1209 15:37:22.378738 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf0007b83_d58b_4cbf_90d2_53001aaf53f2.slice/crio-f65af4f5f4e9a095f150f168d0a08c5e6a8b1b70690412ad9dbc86d741f72fce WatchSource:0}: Error finding container f65af4f5f4e9a095f150f168d0a08c5e6a8b1b70690412ad9dbc86d741f72fce: Status 404 returned error can't find the container with id f65af4f5f4e9a095f150f168d0a08c5e6a8b1b70690412ad9dbc86d741f72fce Dec 09 15:37:23 crc kubenswrapper[4716]: I1209 15:37:23.314478 4716 generic.go:334] "Generic (PLEG): container finished" podID="f0007b83-d58b-4cbf-90d2-53001aaf53f2" containerID="4454aaa3cb932e8173abd1edde6d6d3bbe217154f3de17ae4a1f6fe2ce775536" exitCode=0 Dec 09 15:37:23 crc kubenswrapper[4716]: I1209 15:37:23.314588 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d84b4d45c-xhg5h" event={"ID":"f0007b83-d58b-4cbf-90d2-53001aaf53f2","Type":"ContainerDied","Data":"4454aaa3cb932e8173abd1edde6d6d3bbe217154f3de17ae4a1f6fe2ce775536"} Dec 09 15:37:23 crc kubenswrapper[4716]: I1209 15:37:23.314868 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d84b4d45c-xhg5h" event={"ID":"f0007b83-d58b-4cbf-90d2-53001aaf53f2","Type":"ContainerStarted","Data":"f65af4f5f4e9a095f150f168d0a08c5e6a8b1b70690412ad9dbc86d741f72fce"} Dec 09 15:37:23 crc kubenswrapper[4716]: I1209 15:37:23.319275 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"fcc8c279-3c20-4050-95d5-5b71af2134cf","Type":"ContainerStarted","Data":"01d7c4b2915026ba0e9c44f80419410debb36b833589a769b660296a8b439395"} Dec 09 15:37:23 crc kubenswrapper[4716]: I1209 15:37:23.347473 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b3fcad49-046e-4075-8906-c4629fc77587","Type":"ContainerStarted","Data":"36235f6deff81c28af644a37c960acbeed94f79ca153565b0d4a25732282be4a"} Dec 09 15:37:24 crc kubenswrapper[4716]: I1209 15:37:24.362363 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d84b4d45c-xhg5h" event={"ID":"f0007b83-d58b-4cbf-90d2-53001aaf53f2","Type":"ContainerStarted","Data":"a7a89d6c5a212c5d3b83df66ed54f22e8551e606c4ba7bf3be413d52e554a338"} Dec 09 15:37:24 crc kubenswrapper[4716]: I1209 15:37:24.388261 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7d84b4d45c-xhg5h" podStartSLOduration=3.3882402 podStartE2EDuration="3.3882402s" podCreationTimestamp="2025-12-09 15:37:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:37:24.381104558 +0000 UTC m=+1731.535848546" watchObservedRunningTime="2025-12-09 15:37:24.3882402 +0000 UTC m=+1731.542984188" Dec 09 15:37:25 crc kubenswrapper[4716]: I1209 15:37:25.375544 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7d84b4d45c-xhg5h" Dec 09 15:37:26 crc kubenswrapper[4716]: I1209 15:37:26.224323 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Dec 09 15:37:26 crc kubenswrapper[4716]: E1209 15:37:26.350370 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 15:37:26 crc kubenswrapper[4716]: E1209 15:37:26.350501 4716 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 15:37:26 crc kubenswrapper[4716]: E1209 15:37:26.350745 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5fbh9fh77h667h669h94h589h59dh5bdh5bch9fh68h689h699h559h549h56fh5f5h68fh557h5cch7fh66ch6ch86h56h75h55fh58h5ffhf9h5fdq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lz724,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(29cebb2d-8cdb-49de-a29d-1d02808e46a9): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 15:37:26 crc kubenswrapper[4716]: E1209 15:37:26.352109 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:37:26 crc kubenswrapper[4716]: E1209 15:37:26.387861 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:37:28 crc kubenswrapper[4716]: I1209 15:37:28.214008 4716 scope.go:117] "RemoveContainer" containerID="f23d8f973b0c391889ecea36a999ba03ea85dda0d274716304ac66db87b6b8dc" Dec 09 15:37:28 crc kubenswrapper[4716]: E1209 15:37:28.215482 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:37:31 crc kubenswrapper[4716]: I1209 15:37:31.716936 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7d84b4d45c-xhg5h" Dec 09 15:37:31 crc kubenswrapper[4716]: I1209 15:37:31.784063 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b7bbf7cf9-tngkz"] Dec 09 15:37:31 crc kubenswrapper[4716]: I1209 15:37:31.784391 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6b7bbf7cf9-tngkz" podUID="ef6af67b-56b5-45e7-8811-3ccf3ea02613" containerName="dnsmasq-dns" containerID="cri-o://b1ddc6c27910cc1411c865b8b2c0e8b0f224d5df32b9ab97972b290f8f266983" gracePeriod=10 Dec 09 15:37:31 crc kubenswrapper[4716]: I1209 15:37:31.998152 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6f6df4f56c-r72fn"] Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.001002 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f6df4f56c-r72fn" Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.012981 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6f6df4f56c-r72fn"] Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.131970 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6cd1bcab-3e4a-41f7-97a2-19818a1f3415-dns-swift-storage-0\") pod \"dnsmasq-dns-6f6df4f56c-r72fn\" (UID: \"6cd1bcab-3e4a-41f7-97a2-19818a1f3415\") " pod="openstack/dnsmasq-dns-6f6df4f56c-r72fn" Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.132047 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6cd1bcab-3e4a-41f7-97a2-19818a1f3415-dns-svc\") pod \"dnsmasq-dns-6f6df4f56c-r72fn\" (UID: \"6cd1bcab-3e4a-41f7-97a2-19818a1f3415\") " pod="openstack/dnsmasq-dns-6f6df4f56c-r72fn" Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.132175 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6cd1bcab-3e4a-41f7-97a2-19818a1f3415-ovsdbserver-nb\") pod \"dnsmasq-dns-6f6df4f56c-r72fn\" (UID: \"6cd1bcab-3e4a-41f7-97a2-19818a1f3415\") " pod="openstack/dnsmasq-dns-6f6df4f56c-r72fn" Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.132270 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mkx4t\" (UniqueName: \"kubernetes.io/projected/6cd1bcab-3e4a-41f7-97a2-19818a1f3415-kube-api-access-mkx4t\") pod \"dnsmasq-dns-6f6df4f56c-r72fn\" (UID: \"6cd1bcab-3e4a-41f7-97a2-19818a1f3415\") " pod="openstack/dnsmasq-dns-6f6df4f56c-r72fn" Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.132325 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6cd1bcab-3e4a-41f7-97a2-19818a1f3415-ovsdbserver-sb\") pod \"dnsmasq-dns-6f6df4f56c-r72fn\" (UID: \"6cd1bcab-3e4a-41f7-97a2-19818a1f3415\") " pod="openstack/dnsmasq-dns-6f6df4f56c-r72fn" Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.132402 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6cd1bcab-3e4a-41f7-97a2-19818a1f3415-config\") pod \"dnsmasq-dns-6f6df4f56c-r72fn\" (UID: \"6cd1bcab-3e4a-41f7-97a2-19818a1f3415\") " pod="openstack/dnsmasq-dns-6f6df4f56c-r72fn" Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.132456 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/6cd1bcab-3e4a-41f7-97a2-19818a1f3415-openstack-edpm-ipam\") pod \"dnsmasq-dns-6f6df4f56c-r72fn\" (UID: \"6cd1bcab-3e4a-41f7-97a2-19818a1f3415\") " pod="openstack/dnsmasq-dns-6f6df4f56c-r72fn" Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.235066 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6cd1bcab-3e4a-41f7-97a2-19818a1f3415-ovsdbserver-nb\") pod \"dnsmasq-dns-6f6df4f56c-r72fn\" (UID: \"6cd1bcab-3e4a-41f7-97a2-19818a1f3415\") " pod="openstack/dnsmasq-dns-6f6df4f56c-r72fn" Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.235196 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mkx4t\" (UniqueName: \"kubernetes.io/projected/6cd1bcab-3e4a-41f7-97a2-19818a1f3415-kube-api-access-mkx4t\") pod \"dnsmasq-dns-6f6df4f56c-r72fn\" (UID: \"6cd1bcab-3e4a-41f7-97a2-19818a1f3415\") " pod="openstack/dnsmasq-dns-6f6df4f56c-r72fn" Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.235253 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6cd1bcab-3e4a-41f7-97a2-19818a1f3415-ovsdbserver-sb\") pod \"dnsmasq-dns-6f6df4f56c-r72fn\" (UID: \"6cd1bcab-3e4a-41f7-97a2-19818a1f3415\") " pod="openstack/dnsmasq-dns-6f6df4f56c-r72fn" Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.235321 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6cd1bcab-3e4a-41f7-97a2-19818a1f3415-config\") pod \"dnsmasq-dns-6f6df4f56c-r72fn\" (UID: \"6cd1bcab-3e4a-41f7-97a2-19818a1f3415\") " pod="openstack/dnsmasq-dns-6f6df4f56c-r72fn" Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.235376 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/6cd1bcab-3e4a-41f7-97a2-19818a1f3415-openstack-edpm-ipam\") pod \"dnsmasq-dns-6f6df4f56c-r72fn\" (UID: \"6cd1bcab-3e4a-41f7-97a2-19818a1f3415\") " pod="openstack/dnsmasq-dns-6f6df4f56c-r72fn" Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.236183 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6cd1bcab-3e4a-41f7-97a2-19818a1f3415-dns-swift-storage-0\") pod \"dnsmasq-dns-6f6df4f56c-r72fn\" (UID: \"6cd1bcab-3e4a-41f7-97a2-19818a1f3415\") " pod="openstack/dnsmasq-dns-6f6df4f56c-r72fn" Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.236520 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6cd1bcab-3e4a-41f7-97a2-19818a1f3415-ovsdbserver-sb\") pod \"dnsmasq-dns-6f6df4f56c-r72fn\" (UID: \"6cd1bcab-3e4a-41f7-97a2-19818a1f3415\") " pod="openstack/dnsmasq-dns-6f6df4f56c-r72fn" Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.236696 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6cd1bcab-3e4a-41f7-97a2-19818a1f3415-config\") pod \"dnsmasq-dns-6f6df4f56c-r72fn\" (UID: \"6cd1bcab-3e4a-41f7-97a2-19818a1f3415\") " pod="openstack/dnsmasq-dns-6f6df4f56c-r72fn" Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.236770 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/6cd1bcab-3e4a-41f7-97a2-19818a1f3415-openstack-edpm-ipam\") pod \"dnsmasq-dns-6f6df4f56c-r72fn\" (UID: \"6cd1bcab-3e4a-41f7-97a2-19818a1f3415\") " pod="openstack/dnsmasq-dns-6f6df4f56c-r72fn" Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.237039 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6cd1bcab-3e4a-41f7-97a2-19818a1f3415-dns-swift-storage-0\") pod \"dnsmasq-dns-6f6df4f56c-r72fn\" (UID: \"6cd1bcab-3e4a-41f7-97a2-19818a1f3415\") " pod="openstack/dnsmasq-dns-6f6df4f56c-r72fn" Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.237205 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6cd1bcab-3e4a-41f7-97a2-19818a1f3415-dns-svc\") pod \"dnsmasq-dns-6f6df4f56c-r72fn\" (UID: \"6cd1bcab-3e4a-41f7-97a2-19818a1f3415\") " pod="openstack/dnsmasq-dns-6f6df4f56c-r72fn" Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.238037 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6cd1bcab-3e4a-41f7-97a2-19818a1f3415-ovsdbserver-nb\") pod \"dnsmasq-dns-6f6df4f56c-r72fn\" (UID: \"6cd1bcab-3e4a-41f7-97a2-19818a1f3415\") " pod="openstack/dnsmasq-dns-6f6df4f56c-r72fn" Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.238055 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6cd1bcab-3e4a-41f7-97a2-19818a1f3415-dns-svc\") pod \"dnsmasq-dns-6f6df4f56c-r72fn\" (UID: \"6cd1bcab-3e4a-41f7-97a2-19818a1f3415\") " pod="openstack/dnsmasq-dns-6f6df4f56c-r72fn" Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.256805 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mkx4t\" (UniqueName: \"kubernetes.io/projected/6cd1bcab-3e4a-41f7-97a2-19818a1f3415-kube-api-access-mkx4t\") pod \"dnsmasq-dns-6f6df4f56c-r72fn\" (UID: \"6cd1bcab-3e4a-41f7-97a2-19818a1f3415\") " pod="openstack/dnsmasq-dns-6f6df4f56c-r72fn" Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.321461 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f6df4f56c-r72fn" Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.469861 4716 generic.go:334] "Generic (PLEG): container finished" podID="ef6af67b-56b5-45e7-8811-3ccf3ea02613" containerID="b1ddc6c27910cc1411c865b8b2c0e8b0f224d5df32b9ab97972b290f8f266983" exitCode=0 Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.469909 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7bbf7cf9-tngkz" event={"ID":"ef6af67b-56b5-45e7-8811-3ccf3ea02613","Type":"ContainerDied","Data":"b1ddc6c27910cc1411c865b8b2c0e8b0f224d5df32b9ab97972b290f8f266983"} Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.469941 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7bbf7cf9-tngkz" event={"ID":"ef6af67b-56b5-45e7-8811-3ccf3ea02613","Type":"ContainerDied","Data":"23c909c88ef8a7b046c289c1e23b40462e32194c02a73dc15baf5613d7083301"} Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.469951 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="23c909c88ef8a7b046c289c1e23b40462e32194c02a73dc15baf5613d7083301" Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.489763 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7bbf7cf9-tngkz" Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.656840 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ef6af67b-56b5-45e7-8811-3ccf3ea02613-dns-swift-storage-0\") pod \"ef6af67b-56b5-45e7-8811-3ccf3ea02613\" (UID: \"ef6af67b-56b5-45e7-8811-3ccf3ea02613\") " Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.656965 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4p7hk\" (UniqueName: \"kubernetes.io/projected/ef6af67b-56b5-45e7-8811-3ccf3ea02613-kube-api-access-4p7hk\") pod \"ef6af67b-56b5-45e7-8811-3ccf3ea02613\" (UID: \"ef6af67b-56b5-45e7-8811-3ccf3ea02613\") " Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.657148 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ef6af67b-56b5-45e7-8811-3ccf3ea02613-ovsdbserver-nb\") pod \"ef6af67b-56b5-45e7-8811-3ccf3ea02613\" (UID: \"ef6af67b-56b5-45e7-8811-3ccf3ea02613\") " Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.657238 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ef6af67b-56b5-45e7-8811-3ccf3ea02613-dns-svc\") pod \"ef6af67b-56b5-45e7-8811-3ccf3ea02613\" (UID: \"ef6af67b-56b5-45e7-8811-3ccf3ea02613\") " Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.657283 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef6af67b-56b5-45e7-8811-3ccf3ea02613-config\") pod \"ef6af67b-56b5-45e7-8811-3ccf3ea02613\" (UID: \"ef6af67b-56b5-45e7-8811-3ccf3ea02613\") " Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.657317 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ef6af67b-56b5-45e7-8811-3ccf3ea02613-ovsdbserver-sb\") pod \"ef6af67b-56b5-45e7-8811-3ccf3ea02613\" (UID: \"ef6af67b-56b5-45e7-8811-3ccf3ea02613\") " Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.674188 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef6af67b-56b5-45e7-8811-3ccf3ea02613-kube-api-access-4p7hk" (OuterVolumeSpecName: "kube-api-access-4p7hk") pod "ef6af67b-56b5-45e7-8811-3ccf3ea02613" (UID: "ef6af67b-56b5-45e7-8811-3ccf3ea02613"). InnerVolumeSpecName "kube-api-access-4p7hk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.734756 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef6af67b-56b5-45e7-8811-3ccf3ea02613-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "ef6af67b-56b5-45e7-8811-3ccf3ea02613" (UID: "ef6af67b-56b5-45e7-8811-3ccf3ea02613"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.738609 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef6af67b-56b5-45e7-8811-3ccf3ea02613-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ef6af67b-56b5-45e7-8811-3ccf3ea02613" (UID: "ef6af67b-56b5-45e7-8811-3ccf3ea02613"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.740942 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef6af67b-56b5-45e7-8811-3ccf3ea02613-config" (OuterVolumeSpecName: "config") pod "ef6af67b-56b5-45e7-8811-3ccf3ea02613" (UID: "ef6af67b-56b5-45e7-8811-3ccf3ea02613"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.756147 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef6af67b-56b5-45e7-8811-3ccf3ea02613-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ef6af67b-56b5-45e7-8811-3ccf3ea02613" (UID: "ef6af67b-56b5-45e7-8811-3ccf3ea02613"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.761251 4716 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ef6af67b-56b5-45e7-8811-3ccf3ea02613-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.761295 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4p7hk\" (UniqueName: \"kubernetes.io/projected/ef6af67b-56b5-45e7-8811-3ccf3ea02613-kube-api-access-4p7hk\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.761308 4716 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ef6af67b-56b5-45e7-8811-3ccf3ea02613-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.761352 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef6af67b-56b5-45e7-8811-3ccf3ea02613-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.761365 4716 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ef6af67b-56b5-45e7-8811-3ccf3ea02613-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.764030 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef6af67b-56b5-45e7-8811-3ccf3ea02613-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ef6af67b-56b5-45e7-8811-3ccf3ea02613" (UID: "ef6af67b-56b5-45e7-8811-3ccf3ea02613"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.848858 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6f6df4f56c-r72fn"] Dec 09 15:37:32 crc kubenswrapper[4716]: I1209 15:37:32.863757 4716 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ef6af67b-56b5-45e7-8811-3ccf3ea02613-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:33 crc kubenswrapper[4716]: E1209 15:37:33.226893 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:37:33 crc kubenswrapper[4716]: I1209 15:37:33.484722 4716 generic.go:334] "Generic (PLEG): container finished" podID="6cd1bcab-3e4a-41f7-97a2-19818a1f3415" containerID="ef71d2686049fc78464c531dcec1285871eacf1f5143951e46466e6bb269bcbe" exitCode=0 Dec 09 15:37:33 crc kubenswrapper[4716]: I1209 15:37:33.484853 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7bbf7cf9-tngkz" Dec 09 15:37:33 crc kubenswrapper[4716]: I1209 15:37:33.485196 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f6df4f56c-r72fn" event={"ID":"6cd1bcab-3e4a-41f7-97a2-19818a1f3415","Type":"ContainerDied","Data":"ef71d2686049fc78464c531dcec1285871eacf1f5143951e46466e6bb269bcbe"} Dec 09 15:37:33 crc kubenswrapper[4716]: I1209 15:37:33.485334 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f6df4f56c-r72fn" event={"ID":"6cd1bcab-3e4a-41f7-97a2-19818a1f3415","Type":"ContainerStarted","Data":"2b64b9a8f2bc1d5c494b083b9adb68c0e02da52f5ad0b74aceab9e4efb73f352"} Dec 09 15:37:33 crc kubenswrapper[4716]: I1209 15:37:33.573972 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b7bbf7cf9-tngkz"] Dec 09 15:37:33 crc kubenswrapper[4716]: I1209 15:37:33.593124 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6b7bbf7cf9-tngkz"] Dec 09 15:37:34 crc kubenswrapper[4716]: I1209 15:37:34.501775 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f6df4f56c-r72fn" event={"ID":"6cd1bcab-3e4a-41f7-97a2-19818a1f3415","Type":"ContainerStarted","Data":"7389a1713261c7e4ba7dc0e7ad5927e4d678b29be6013f704c4c42e75890b4cf"} Dec 09 15:37:34 crc kubenswrapper[4716]: I1209 15:37:34.502100 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6f6df4f56c-r72fn" Dec 09 15:37:34 crc kubenswrapper[4716]: I1209 15:37:34.522760 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6f6df4f56c-r72fn" podStartSLOduration=3.52273396 podStartE2EDuration="3.52273396s" podCreationTimestamp="2025-12-09 15:37:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:37:34.517692517 +0000 UTC m=+1741.672436505" watchObservedRunningTime="2025-12-09 15:37:34.52273396 +0000 UTC m=+1741.677477938" Dec 09 15:37:35 crc kubenswrapper[4716]: E1209 15:37:35.191922 4716 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6cd1bcab_3e4a_41f7_97a2_19818a1f3415.slice/crio-ef71d2686049fc78464c531dcec1285871eacf1f5143951e46466e6bb269bcbe.scope\": RecentStats: unable to find data in memory cache]" Dec 09 15:37:35 crc kubenswrapper[4716]: I1209 15:37:35.227925 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ef6af67b-56b5-45e7-8811-3ccf3ea02613" path="/var/lib/kubelet/pods/ef6af67b-56b5-45e7-8811-3ccf3ea02613/volumes" Dec 09 15:37:37 crc kubenswrapper[4716]: I1209 15:37:37.433855 4716 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6b7bbf7cf9-tngkz" podUID="ef6af67b-56b5-45e7-8811-3ccf3ea02613" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.250:5353: i/o timeout" Dec 09 15:37:40 crc kubenswrapper[4716]: I1209 15:37:40.214416 4716 scope.go:117] "RemoveContainer" containerID="f23d8f973b0c391889ecea36a999ba03ea85dda0d274716304ac66db87b6b8dc" Dec 09 15:37:40 crc kubenswrapper[4716]: E1209 15:37:40.215432 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:37:41 crc kubenswrapper[4716]: E1209 15:37:41.216013 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:37:42 crc kubenswrapper[4716]: I1209 15:37:42.323911 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6f6df4f56c-r72fn" Dec 09 15:37:42 crc kubenswrapper[4716]: I1209 15:37:42.390872 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7d84b4d45c-xhg5h"] Dec 09 15:37:42 crc kubenswrapper[4716]: I1209 15:37:42.391176 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7d84b4d45c-xhg5h" podUID="f0007b83-d58b-4cbf-90d2-53001aaf53f2" containerName="dnsmasq-dns" containerID="cri-o://a7a89d6c5a212c5d3b83df66ed54f22e8551e606c4ba7bf3be413d52e554a338" gracePeriod=10 Dec 09 15:37:42 crc kubenswrapper[4716]: I1209 15:37:42.606380 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d84b4d45c-xhg5h" event={"ID":"f0007b83-d58b-4cbf-90d2-53001aaf53f2","Type":"ContainerDied","Data":"a7a89d6c5a212c5d3b83df66ed54f22e8551e606c4ba7bf3be413d52e554a338"} Dec 09 15:37:42 crc kubenswrapper[4716]: I1209 15:37:42.606382 4716 generic.go:334] "Generic (PLEG): container finished" podID="f0007b83-d58b-4cbf-90d2-53001aaf53f2" containerID="a7a89d6c5a212c5d3b83df66ed54f22e8551e606c4ba7bf3be413d52e554a338" exitCode=0 Dec 09 15:37:42 crc kubenswrapper[4716]: I1209 15:37:42.946563 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d84b4d45c-xhg5h" Dec 09 15:37:43 crc kubenswrapper[4716]: I1209 15:37:43.027808 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f0007b83-d58b-4cbf-90d2-53001aaf53f2-ovsdbserver-nb\") pod \"f0007b83-d58b-4cbf-90d2-53001aaf53f2\" (UID: \"f0007b83-d58b-4cbf-90d2-53001aaf53f2\") " Dec 09 15:37:43 crc kubenswrapper[4716]: I1209 15:37:43.027937 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0007b83-d58b-4cbf-90d2-53001aaf53f2-config\") pod \"f0007b83-d58b-4cbf-90d2-53001aaf53f2\" (UID: \"f0007b83-d58b-4cbf-90d2-53001aaf53f2\") " Dec 09 15:37:43 crc kubenswrapper[4716]: I1209 15:37:43.027975 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f0007b83-d58b-4cbf-90d2-53001aaf53f2-dns-swift-storage-0\") pod \"f0007b83-d58b-4cbf-90d2-53001aaf53f2\" (UID: \"f0007b83-d58b-4cbf-90d2-53001aaf53f2\") " Dec 09 15:37:43 crc kubenswrapper[4716]: I1209 15:37:43.028071 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f0007b83-d58b-4cbf-90d2-53001aaf53f2-ovsdbserver-sb\") pod \"f0007b83-d58b-4cbf-90d2-53001aaf53f2\" (UID: \"f0007b83-d58b-4cbf-90d2-53001aaf53f2\") " Dec 09 15:37:43 crc kubenswrapper[4716]: I1209 15:37:43.028183 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/f0007b83-d58b-4cbf-90d2-53001aaf53f2-openstack-edpm-ipam\") pod \"f0007b83-d58b-4cbf-90d2-53001aaf53f2\" (UID: \"f0007b83-d58b-4cbf-90d2-53001aaf53f2\") " Dec 09 15:37:43 crc kubenswrapper[4716]: I1209 15:37:43.028224 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x95vw\" (UniqueName: \"kubernetes.io/projected/f0007b83-d58b-4cbf-90d2-53001aaf53f2-kube-api-access-x95vw\") pod \"f0007b83-d58b-4cbf-90d2-53001aaf53f2\" (UID: \"f0007b83-d58b-4cbf-90d2-53001aaf53f2\") " Dec 09 15:37:43 crc kubenswrapper[4716]: I1209 15:37:43.028274 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f0007b83-d58b-4cbf-90d2-53001aaf53f2-dns-svc\") pod \"f0007b83-d58b-4cbf-90d2-53001aaf53f2\" (UID: \"f0007b83-d58b-4cbf-90d2-53001aaf53f2\") " Dec 09 15:37:43 crc kubenswrapper[4716]: I1209 15:37:43.033651 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f0007b83-d58b-4cbf-90d2-53001aaf53f2-kube-api-access-x95vw" (OuterVolumeSpecName: "kube-api-access-x95vw") pod "f0007b83-d58b-4cbf-90d2-53001aaf53f2" (UID: "f0007b83-d58b-4cbf-90d2-53001aaf53f2"). InnerVolumeSpecName "kube-api-access-x95vw". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:37:43 crc kubenswrapper[4716]: I1209 15:37:43.090173 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f0007b83-d58b-4cbf-90d2-53001aaf53f2-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "f0007b83-d58b-4cbf-90d2-53001aaf53f2" (UID: "f0007b83-d58b-4cbf-90d2-53001aaf53f2"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:37:43 crc kubenswrapper[4716]: I1209 15:37:43.095232 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f0007b83-d58b-4cbf-90d2-53001aaf53f2-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f0007b83-d58b-4cbf-90d2-53001aaf53f2" (UID: "f0007b83-d58b-4cbf-90d2-53001aaf53f2"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:37:43 crc kubenswrapper[4716]: I1209 15:37:43.099800 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f0007b83-d58b-4cbf-90d2-53001aaf53f2-config" (OuterVolumeSpecName: "config") pod "f0007b83-d58b-4cbf-90d2-53001aaf53f2" (UID: "f0007b83-d58b-4cbf-90d2-53001aaf53f2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:37:43 crc kubenswrapper[4716]: I1209 15:37:43.110178 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f0007b83-d58b-4cbf-90d2-53001aaf53f2-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f0007b83-d58b-4cbf-90d2-53001aaf53f2" (UID: "f0007b83-d58b-4cbf-90d2-53001aaf53f2"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:37:43 crc kubenswrapper[4716]: I1209 15:37:43.118663 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f0007b83-d58b-4cbf-90d2-53001aaf53f2-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "f0007b83-d58b-4cbf-90d2-53001aaf53f2" (UID: "f0007b83-d58b-4cbf-90d2-53001aaf53f2"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:37:43 crc kubenswrapper[4716]: I1209 15:37:43.132245 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f0007b83-d58b-4cbf-90d2-53001aaf53f2-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f0007b83-d58b-4cbf-90d2-53001aaf53f2" (UID: "f0007b83-d58b-4cbf-90d2-53001aaf53f2"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:37:43 crc kubenswrapper[4716]: I1209 15:37:43.132531 4716 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0007b83-d58b-4cbf-90d2-53001aaf53f2-config\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:43 crc kubenswrapper[4716]: I1209 15:37:43.132577 4716 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f0007b83-d58b-4cbf-90d2-53001aaf53f2-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:43 crc kubenswrapper[4716]: I1209 15:37:43.132594 4716 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f0007b83-d58b-4cbf-90d2-53001aaf53f2-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:43 crc kubenswrapper[4716]: I1209 15:37:43.132608 4716 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/f0007b83-d58b-4cbf-90d2-53001aaf53f2-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:43 crc kubenswrapper[4716]: I1209 15:37:43.132640 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x95vw\" (UniqueName: \"kubernetes.io/projected/f0007b83-d58b-4cbf-90d2-53001aaf53f2-kube-api-access-x95vw\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:43 crc kubenswrapper[4716]: I1209 15:37:43.132653 4716 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f0007b83-d58b-4cbf-90d2-53001aaf53f2-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:43 crc kubenswrapper[4716]: I1209 15:37:43.132664 4716 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f0007b83-d58b-4cbf-90d2-53001aaf53f2-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 09 15:37:43 crc kubenswrapper[4716]: E1209 15:37:43.458361 4716 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf0007b83_d58b_4cbf_90d2_53001aaf53f2.slice/crio-f65af4f5f4e9a095f150f168d0a08c5e6a8b1b70690412ad9dbc86d741f72fce\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6cd1bcab_3e4a_41f7_97a2_19818a1f3415.slice/crio-ef71d2686049fc78464c531dcec1285871eacf1f5143951e46466e6bb269bcbe.scope\": RecentStats: unable to find data in memory cache]" Dec 09 15:37:43 crc kubenswrapper[4716]: I1209 15:37:43.620371 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7d84b4d45c-xhg5h" event={"ID":"f0007b83-d58b-4cbf-90d2-53001aaf53f2","Type":"ContainerDied","Data":"f65af4f5f4e9a095f150f168d0a08c5e6a8b1b70690412ad9dbc86d741f72fce"} Dec 09 15:37:43 crc kubenswrapper[4716]: I1209 15:37:43.620436 4716 scope.go:117] "RemoveContainer" containerID="a7a89d6c5a212c5d3b83df66ed54f22e8551e606c4ba7bf3be413d52e554a338" Dec 09 15:37:43 crc kubenswrapper[4716]: I1209 15:37:43.620443 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7d84b4d45c-xhg5h" Dec 09 15:37:43 crc kubenswrapper[4716]: I1209 15:37:43.656410 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7d84b4d45c-xhg5h"] Dec 09 15:37:43 crc kubenswrapper[4716]: I1209 15:37:43.669056 4716 scope.go:117] "RemoveContainer" containerID="4454aaa3cb932e8173abd1edde6d6d3bbe217154f3de17ae4a1f6fe2ce775536" Dec 09 15:37:43 crc kubenswrapper[4716]: I1209 15:37:43.676718 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7d84b4d45c-xhg5h"] Dec 09 15:37:45 crc kubenswrapper[4716]: I1209 15:37:45.226636 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f0007b83-d58b-4cbf-90d2-53001aaf53f2" path="/var/lib/kubelet/pods/f0007b83-d58b-4cbf-90d2-53001aaf53f2/volumes" Dec 09 15:37:45 crc kubenswrapper[4716]: E1209 15:37:45.245014 4716 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6cd1bcab_3e4a_41f7_97a2_19818a1f3415.slice/crio-ef71d2686049fc78464c531dcec1285871eacf1f5143951e46466e6bb269bcbe.scope\": RecentStats: unable to find data in memory cache]" Dec 09 15:37:47 crc kubenswrapper[4716]: E1209 15:37:47.347668 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 09 15:37:47 crc kubenswrapper[4716]: E1209 15:37:47.347991 4716 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 09 15:37:47 crc kubenswrapper[4716]: E1209 15:37:47.348137 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qfm49,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-msshl_openstack(239c4119-944c-46e1-9425-285eeb6e0204): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 15:37:47 crc kubenswrapper[4716]: E1209 15:37:47.349362 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:37:48 crc kubenswrapper[4716]: E1209 15:37:48.105270 4716 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6cd1bcab_3e4a_41f7_97a2_19818a1f3415.slice/crio-ef71d2686049fc78464c531dcec1285871eacf1f5143951e46466e6bb269bcbe.scope\": RecentStats: unable to find data in memory cache]" Dec 09 15:37:48 crc kubenswrapper[4716]: E1209 15:37:48.105297 4716 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6cd1bcab_3e4a_41f7_97a2_19818a1f3415.slice/crio-ef71d2686049fc78464c531dcec1285871eacf1f5143951e46466e6bb269bcbe.scope\": RecentStats: unable to find data in memory cache]" Dec 09 15:37:51 crc kubenswrapper[4716]: I1209 15:37:51.213706 4716 scope.go:117] "RemoveContainer" containerID="f23d8f973b0c391889ecea36a999ba03ea85dda0d274716304ac66db87b6b8dc" Dec 09 15:37:51 crc kubenswrapper[4716]: E1209 15:37:51.214544 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:37:54 crc kubenswrapper[4716]: I1209 15:37:54.745543 4716 generic.go:334] "Generic (PLEG): container finished" podID="fcc8c279-3c20-4050-95d5-5b71af2134cf" containerID="01d7c4b2915026ba0e9c44f80419410debb36b833589a769b660296a8b439395" exitCode=0 Dec 09 15:37:54 crc kubenswrapper[4716]: I1209 15:37:54.745612 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"fcc8c279-3c20-4050-95d5-5b71af2134cf","Type":"ContainerDied","Data":"01d7c4b2915026ba0e9c44f80419410debb36b833589a769b660296a8b439395"} Dec 09 15:37:54 crc kubenswrapper[4716]: I1209 15:37:54.749564 4716 generic.go:334] "Generic (PLEG): container finished" podID="b3fcad49-046e-4075-8906-c4629fc77587" containerID="36235f6deff81c28af644a37c960acbeed94f79ca153565b0d4a25732282be4a" exitCode=0 Dec 09 15:37:54 crc kubenswrapper[4716]: I1209 15:37:54.749850 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b3fcad49-046e-4075-8906-c4629fc77587","Type":"ContainerDied","Data":"36235f6deff81c28af644a37c960acbeed94f79ca153565b0d4a25732282be4a"} Dec 09 15:37:55 crc kubenswrapper[4716]: E1209 15:37:55.624427 4716 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6cd1bcab_3e4a_41f7_97a2_19818a1f3415.slice/crio-ef71d2686049fc78464c531dcec1285871eacf1f5143951e46466e6bb269bcbe.scope\": RecentStats: unable to find data in memory cache]" Dec 09 15:37:55 crc kubenswrapper[4716]: I1209 15:37:55.763608 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"fcc8c279-3c20-4050-95d5-5b71af2134cf","Type":"ContainerStarted","Data":"30e6d5bd04a3d80618689f48e6fa2b4226e3b8502b8fd22e954f0ef921d7912e"} Dec 09 15:37:55 crc kubenswrapper[4716]: I1209 15:37:55.763875 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Dec 09 15:37:55 crc kubenswrapper[4716]: I1209 15:37:55.765723 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b3fcad49-046e-4075-8906-c4629fc77587","Type":"ContainerStarted","Data":"cb13df549a9b04ba0f927308b392a199738e4caae09f6ef384315f5e12b5d140"} Dec 09 15:37:55 crc kubenswrapper[4716]: I1209 15:37:55.765937 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:37:55 crc kubenswrapper[4716]: I1209 15:37:55.790412 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=36.79037373 podStartE2EDuration="36.79037373s" podCreationTimestamp="2025-12-09 15:37:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:37:55.786579002 +0000 UTC m=+1762.941322990" watchObservedRunningTime="2025-12-09 15:37:55.79037373 +0000 UTC m=+1762.945117718" Dec 09 15:37:55 crc kubenswrapper[4716]: I1209 15:37:55.833766 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=37.833740299 podStartE2EDuration="37.833740299s" podCreationTimestamp="2025-12-09 15:37:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 15:37:55.820139063 +0000 UTC m=+1762.974883061" watchObservedRunningTime="2025-12-09 15:37:55.833740299 +0000 UTC m=+1762.988484287" Dec 09 15:37:56 crc kubenswrapper[4716]: E1209 15:37:56.337496 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 15:37:56 crc kubenswrapper[4716]: E1209 15:37:56.337982 4716 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 15:37:56 crc kubenswrapper[4716]: E1209 15:37:56.338147 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5fbh9fh77h667h669h94h589h59dh5bdh5bch9fh68h689h699h559h549h56fh5f5h68fh557h5cch7fh66ch6ch86h56h75h55fh58h5ffhf9h5fdq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lz724,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(29cebb2d-8cdb-49de-a29d-1d02808e46a9): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 15:37:56 crc kubenswrapper[4716]: E1209 15:37:56.339279 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:37:57 crc kubenswrapper[4716]: I1209 15:37:57.108428 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ldbjm"] Dec 09 15:37:57 crc kubenswrapper[4716]: E1209 15:37:57.109285 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0007b83-d58b-4cbf-90d2-53001aaf53f2" containerName="dnsmasq-dns" Dec 09 15:37:57 crc kubenswrapper[4716]: I1209 15:37:57.109298 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0007b83-d58b-4cbf-90d2-53001aaf53f2" containerName="dnsmasq-dns" Dec 09 15:37:57 crc kubenswrapper[4716]: E1209 15:37:57.109323 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0007b83-d58b-4cbf-90d2-53001aaf53f2" containerName="init" Dec 09 15:37:57 crc kubenswrapper[4716]: I1209 15:37:57.109329 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0007b83-d58b-4cbf-90d2-53001aaf53f2" containerName="init" Dec 09 15:37:57 crc kubenswrapper[4716]: E1209 15:37:57.109368 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef6af67b-56b5-45e7-8811-3ccf3ea02613" containerName="init" Dec 09 15:37:57 crc kubenswrapper[4716]: I1209 15:37:57.109375 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef6af67b-56b5-45e7-8811-3ccf3ea02613" containerName="init" Dec 09 15:37:57 crc kubenswrapper[4716]: E1209 15:37:57.109386 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef6af67b-56b5-45e7-8811-3ccf3ea02613" containerName="dnsmasq-dns" Dec 09 15:37:57 crc kubenswrapper[4716]: I1209 15:37:57.109393 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef6af67b-56b5-45e7-8811-3ccf3ea02613" containerName="dnsmasq-dns" Dec 09 15:37:57 crc kubenswrapper[4716]: I1209 15:37:57.109616 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="f0007b83-d58b-4cbf-90d2-53001aaf53f2" containerName="dnsmasq-dns" Dec 09 15:37:57 crc kubenswrapper[4716]: I1209 15:37:57.109649 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef6af67b-56b5-45e7-8811-3ccf3ea02613" containerName="dnsmasq-dns" Dec 09 15:37:57 crc kubenswrapper[4716]: I1209 15:37:57.110485 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ldbjm" Dec 09 15:37:57 crc kubenswrapper[4716]: I1209 15:37:57.112882 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 09 15:37:57 crc kubenswrapper[4716]: I1209 15:37:57.113024 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 09 15:37:57 crc kubenswrapper[4716]: I1209 15:37:57.113027 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 09 15:37:57 crc kubenswrapper[4716]: I1209 15:37:57.113301 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-vjwcm" Dec 09 15:37:57 crc kubenswrapper[4716]: I1209 15:37:57.135925 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ldbjm"] Dec 09 15:37:57 crc kubenswrapper[4716]: I1209 15:37:57.286331 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06d46d95-221a-40fb-a82e-0143ea9a6c91-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-ldbjm\" (UID: \"06d46d95-221a-40fb-a82e-0143ea9a6c91\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ldbjm" Dec 09 15:37:57 crc kubenswrapper[4716]: I1209 15:37:57.286412 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/06d46d95-221a-40fb-a82e-0143ea9a6c91-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-ldbjm\" (UID: \"06d46d95-221a-40fb-a82e-0143ea9a6c91\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ldbjm" Dec 09 15:37:57 crc kubenswrapper[4716]: I1209 15:37:57.286497 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/06d46d95-221a-40fb-a82e-0143ea9a6c91-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-ldbjm\" (UID: \"06d46d95-221a-40fb-a82e-0143ea9a6c91\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ldbjm" Dec 09 15:37:57 crc kubenswrapper[4716]: I1209 15:37:57.286803 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-27mcb\" (UniqueName: \"kubernetes.io/projected/06d46d95-221a-40fb-a82e-0143ea9a6c91-kube-api-access-27mcb\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-ldbjm\" (UID: \"06d46d95-221a-40fb-a82e-0143ea9a6c91\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ldbjm" Dec 09 15:37:57 crc kubenswrapper[4716]: I1209 15:37:57.388957 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/06d46d95-221a-40fb-a82e-0143ea9a6c91-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-ldbjm\" (UID: \"06d46d95-221a-40fb-a82e-0143ea9a6c91\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ldbjm" Dec 09 15:37:57 crc kubenswrapper[4716]: I1209 15:37:57.389470 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/06d46d95-221a-40fb-a82e-0143ea9a6c91-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-ldbjm\" (UID: \"06d46d95-221a-40fb-a82e-0143ea9a6c91\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ldbjm" Dec 09 15:37:57 crc kubenswrapper[4716]: I1209 15:37:57.389736 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-27mcb\" (UniqueName: \"kubernetes.io/projected/06d46d95-221a-40fb-a82e-0143ea9a6c91-kube-api-access-27mcb\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-ldbjm\" (UID: \"06d46d95-221a-40fb-a82e-0143ea9a6c91\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ldbjm" Dec 09 15:37:57 crc kubenswrapper[4716]: I1209 15:37:57.392307 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06d46d95-221a-40fb-a82e-0143ea9a6c91-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-ldbjm\" (UID: \"06d46d95-221a-40fb-a82e-0143ea9a6c91\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ldbjm" Dec 09 15:37:57 crc kubenswrapper[4716]: I1209 15:37:57.396034 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/06d46d95-221a-40fb-a82e-0143ea9a6c91-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-ldbjm\" (UID: \"06d46d95-221a-40fb-a82e-0143ea9a6c91\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ldbjm" Dec 09 15:37:57 crc kubenswrapper[4716]: I1209 15:37:57.396161 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06d46d95-221a-40fb-a82e-0143ea9a6c91-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-ldbjm\" (UID: \"06d46d95-221a-40fb-a82e-0143ea9a6c91\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ldbjm" Dec 09 15:37:57 crc kubenswrapper[4716]: I1209 15:37:57.401078 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/06d46d95-221a-40fb-a82e-0143ea9a6c91-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-ldbjm\" (UID: \"06d46d95-221a-40fb-a82e-0143ea9a6c91\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ldbjm" Dec 09 15:37:57 crc kubenswrapper[4716]: I1209 15:37:57.410000 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-27mcb\" (UniqueName: \"kubernetes.io/projected/06d46d95-221a-40fb-a82e-0143ea9a6c91-kube-api-access-27mcb\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-ldbjm\" (UID: \"06d46d95-221a-40fb-a82e-0143ea9a6c91\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ldbjm" Dec 09 15:37:57 crc kubenswrapper[4716]: I1209 15:37:57.431500 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ldbjm" Dec 09 15:37:58 crc kubenswrapper[4716]: I1209 15:37:58.120947 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ldbjm"] Dec 09 15:37:58 crc kubenswrapper[4716]: E1209 15:37:58.173103 4716 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6cd1bcab_3e4a_41f7_97a2_19818a1f3415.slice/crio-ef71d2686049fc78464c531dcec1285871eacf1f5143951e46466e6bb269bcbe.scope\": RecentStats: unable to find data in memory cache]" Dec 09 15:37:58 crc kubenswrapper[4716]: I1209 15:37:58.816764 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ldbjm" event={"ID":"06d46d95-221a-40fb-a82e-0143ea9a6c91","Type":"ContainerStarted","Data":"86ceee7266b48243625018a0a8bc646471cace1e0d1eb00ef7d6f423ea8c120b"} Dec 09 15:38:00 crc kubenswrapper[4716]: E1209 15:38:00.215808 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:38:02 crc kubenswrapper[4716]: I1209 15:38:02.214516 4716 scope.go:117] "RemoveContainer" containerID="f23d8f973b0c391889ecea36a999ba03ea85dda0d274716304ac66db87b6b8dc" Dec 09 15:38:02 crc kubenswrapper[4716]: E1209 15:38:02.218527 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:38:05 crc kubenswrapper[4716]: E1209 15:38:05.736402 4716 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6cd1bcab_3e4a_41f7_97a2_19818a1f3415.slice/crio-ef71d2686049fc78464c531dcec1285871eacf1f5143951e46466e6bb269bcbe.scope\": RecentStats: unable to find data in memory cache]" Dec 09 15:38:07 crc kubenswrapper[4716]: I1209 15:38:07.468923 4716 scope.go:117] "RemoveContainer" containerID="0460309a5095d240c5808831d399e9fd710461684fa8878a37d5bf1fb7faaf34" Dec 09 15:38:09 crc kubenswrapper[4716]: I1209 15:38:09.264791 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Dec 09 15:38:10 crc kubenswrapper[4716]: I1209 15:38:10.007895 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Dec 09 15:38:10 crc kubenswrapper[4716]: I1209 15:38:10.238764 4716 scope.go:117] "RemoveContainer" containerID="89f8649d8de36b3186a92b11c3e5e0c4155a8670ed25422646bdcfed91d890f0" Dec 09 15:38:10 crc kubenswrapper[4716]: E1209 15:38:10.238815 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:38:10 crc kubenswrapper[4716]: I1209 15:38:10.354454 4716 scope.go:117] "RemoveContainer" containerID="8383510e1ab5a6016d5b78a2724e66be30548fb6416cf327c09930c84aa90b98" Dec 09 15:38:10 crc kubenswrapper[4716]: I1209 15:38:10.995777 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ldbjm" event={"ID":"06d46d95-221a-40fb-a82e-0143ea9a6c91","Type":"ContainerStarted","Data":"410a0f003db302a6bfe7a1d6163cd5db84c715678a0bb5342804551928300361"} Dec 09 15:38:11 crc kubenswrapper[4716]: I1209 15:38:11.028389 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ldbjm" podStartSLOduration=1.7100536800000001 podStartE2EDuration="14.02835652s" podCreationTimestamp="2025-12-09 15:37:57 +0000 UTC" firstStartedPulling="2025-12-09 15:37:58.122055238 +0000 UTC m=+1765.276799226" lastFinishedPulling="2025-12-09 15:38:10.440358078 +0000 UTC m=+1777.595102066" observedRunningTime="2025-12-09 15:38:11.015169466 +0000 UTC m=+1778.169913464" watchObservedRunningTime="2025-12-09 15:38:11.02835652 +0000 UTC m=+1778.183100508" Dec 09 15:38:13 crc kubenswrapper[4716]: E1209 15:38:13.445939 4716 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6cd1bcab_3e4a_41f7_97a2_19818a1f3415.slice/crio-ef71d2686049fc78464c531dcec1285871eacf1f5143951e46466e6bb269bcbe.scope\": RecentStats: unable to find data in memory cache]" Dec 09 15:38:14 crc kubenswrapper[4716]: I1209 15:38:14.214870 4716 scope.go:117] "RemoveContainer" containerID="f23d8f973b0c391889ecea36a999ba03ea85dda0d274716304ac66db87b6b8dc" Dec 09 15:38:14 crc kubenswrapper[4716]: E1209 15:38:14.215589 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:38:14 crc kubenswrapper[4716]: E1209 15:38:14.216819 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:38:15 crc kubenswrapper[4716]: E1209 15:38:15.797752 4716 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6cd1bcab_3e4a_41f7_97a2_19818a1f3415.slice/crio-ef71d2686049fc78464c531dcec1285871eacf1f5143951e46466e6bb269bcbe.scope\": RecentStats: unable to find data in memory cache]" Dec 09 15:38:22 crc kubenswrapper[4716]: E1209 15:38:22.219718 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:38:23 crc kubenswrapper[4716]: I1209 15:38:23.164990 4716 generic.go:334] "Generic (PLEG): container finished" podID="06d46d95-221a-40fb-a82e-0143ea9a6c91" containerID="410a0f003db302a6bfe7a1d6163cd5db84c715678a0bb5342804551928300361" exitCode=0 Dec 09 15:38:23 crc kubenswrapper[4716]: I1209 15:38:23.165106 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ldbjm" event={"ID":"06d46d95-221a-40fb-a82e-0143ea9a6c91","Type":"ContainerDied","Data":"410a0f003db302a6bfe7a1d6163cd5db84c715678a0bb5342804551928300361"} Dec 09 15:38:24 crc kubenswrapper[4716]: I1209 15:38:24.751550 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ldbjm" Dec 09 15:38:24 crc kubenswrapper[4716]: I1209 15:38:24.877017 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-27mcb\" (UniqueName: \"kubernetes.io/projected/06d46d95-221a-40fb-a82e-0143ea9a6c91-kube-api-access-27mcb\") pod \"06d46d95-221a-40fb-a82e-0143ea9a6c91\" (UID: \"06d46d95-221a-40fb-a82e-0143ea9a6c91\") " Dec 09 15:38:24 crc kubenswrapper[4716]: I1209 15:38:24.877301 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/06d46d95-221a-40fb-a82e-0143ea9a6c91-ssh-key\") pod \"06d46d95-221a-40fb-a82e-0143ea9a6c91\" (UID: \"06d46d95-221a-40fb-a82e-0143ea9a6c91\") " Dec 09 15:38:24 crc kubenswrapper[4716]: I1209 15:38:24.877506 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/06d46d95-221a-40fb-a82e-0143ea9a6c91-inventory\") pod \"06d46d95-221a-40fb-a82e-0143ea9a6c91\" (UID: \"06d46d95-221a-40fb-a82e-0143ea9a6c91\") " Dec 09 15:38:24 crc kubenswrapper[4716]: I1209 15:38:24.877547 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06d46d95-221a-40fb-a82e-0143ea9a6c91-repo-setup-combined-ca-bundle\") pod \"06d46d95-221a-40fb-a82e-0143ea9a6c91\" (UID: \"06d46d95-221a-40fb-a82e-0143ea9a6c91\") " Dec 09 15:38:24 crc kubenswrapper[4716]: I1209 15:38:24.883841 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06d46d95-221a-40fb-a82e-0143ea9a6c91-kube-api-access-27mcb" (OuterVolumeSpecName: "kube-api-access-27mcb") pod "06d46d95-221a-40fb-a82e-0143ea9a6c91" (UID: "06d46d95-221a-40fb-a82e-0143ea9a6c91"). InnerVolumeSpecName "kube-api-access-27mcb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:38:24 crc kubenswrapper[4716]: I1209 15:38:24.884330 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06d46d95-221a-40fb-a82e-0143ea9a6c91-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "06d46d95-221a-40fb-a82e-0143ea9a6c91" (UID: "06d46d95-221a-40fb-a82e-0143ea9a6c91"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:38:24 crc kubenswrapper[4716]: I1209 15:38:24.913257 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06d46d95-221a-40fb-a82e-0143ea9a6c91-inventory" (OuterVolumeSpecName: "inventory") pod "06d46d95-221a-40fb-a82e-0143ea9a6c91" (UID: "06d46d95-221a-40fb-a82e-0143ea9a6c91"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:38:24 crc kubenswrapper[4716]: I1209 15:38:24.919544 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06d46d95-221a-40fb-a82e-0143ea9a6c91-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "06d46d95-221a-40fb-a82e-0143ea9a6c91" (UID: "06d46d95-221a-40fb-a82e-0143ea9a6c91"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:38:24 crc kubenswrapper[4716]: I1209 15:38:24.982144 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-27mcb\" (UniqueName: \"kubernetes.io/projected/06d46d95-221a-40fb-a82e-0143ea9a6c91-kube-api-access-27mcb\") on node \"crc\" DevicePath \"\"" Dec 09 15:38:24 crc kubenswrapper[4716]: I1209 15:38:24.982183 4716 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/06d46d95-221a-40fb-a82e-0143ea9a6c91-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 09 15:38:24 crc kubenswrapper[4716]: I1209 15:38:24.982192 4716 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/06d46d95-221a-40fb-a82e-0143ea9a6c91-inventory\") on node \"crc\" DevicePath \"\"" Dec 09 15:38:24 crc kubenswrapper[4716]: I1209 15:38:24.982201 4716 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06d46d95-221a-40fb-a82e-0143ea9a6c91-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:38:25 crc kubenswrapper[4716]: I1209 15:38:25.196427 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ldbjm" event={"ID":"06d46d95-221a-40fb-a82e-0143ea9a6c91","Type":"ContainerDied","Data":"86ceee7266b48243625018a0a8bc646471cace1e0d1eb00ef7d6f423ea8c120b"} Dec 09 15:38:25 crc kubenswrapper[4716]: I1209 15:38:25.196495 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="86ceee7266b48243625018a0a8bc646471cace1e0d1eb00ef7d6f423ea8c120b" Dec 09 15:38:25 crc kubenswrapper[4716]: I1209 15:38:25.196499 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ldbjm" Dec 09 15:38:25 crc kubenswrapper[4716]: I1209 15:38:25.215366 4716 scope.go:117] "RemoveContainer" containerID="f23d8f973b0c391889ecea36a999ba03ea85dda0d274716304ac66db87b6b8dc" Dec 09 15:38:25 crc kubenswrapper[4716]: E1209 15:38:25.215717 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:38:25 crc kubenswrapper[4716]: I1209 15:38:25.294825 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-rpdr9"] Dec 09 15:38:25 crc kubenswrapper[4716]: E1209 15:38:25.295608 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06d46d95-221a-40fb-a82e-0143ea9a6c91" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Dec 09 15:38:25 crc kubenswrapper[4716]: I1209 15:38:25.295677 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="06d46d95-221a-40fb-a82e-0143ea9a6c91" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Dec 09 15:38:25 crc kubenswrapper[4716]: I1209 15:38:25.295958 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="06d46d95-221a-40fb-a82e-0143ea9a6c91" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Dec 09 15:38:25 crc kubenswrapper[4716]: I1209 15:38:25.296909 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rpdr9" Dec 09 15:38:25 crc kubenswrapper[4716]: I1209 15:38:25.304236 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 09 15:38:25 crc kubenswrapper[4716]: I1209 15:38:25.304434 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-vjwcm" Dec 09 15:38:25 crc kubenswrapper[4716]: I1209 15:38:25.304486 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 09 15:38:25 crc kubenswrapper[4716]: I1209 15:38:25.304499 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 09 15:38:25 crc kubenswrapper[4716]: I1209 15:38:25.322396 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-rpdr9"] Dec 09 15:38:25 crc kubenswrapper[4716]: I1209 15:38:25.393128 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ee7f866e-a97e-410e-abf9-b67082ac802f-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-rpdr9\" (UID: \"ee7f866e-a97e-410e-abf9-b67082ac802f\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rpdr9" Dec 09 15:38:25 crc kubenswrapper[4716]: I1209 15:38:25.393224 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ee7f866e-a97e-410e-abf9-b67082ac802f-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-rpdr9\" (UID: \"ee7f866e-a97e-410e-abf9-b67082ac802f\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rpdr9" Dec 09 15:38:25 crc kubenswrapper[4716]: I1209 15:38:25.393302 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dk9gd\" (UniqueName: \"kubernetes.io/projected/ee7f866e-a97e-410e-abf9-b67082ac802f-kube-api-access-dk9gd\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-rpdr9\" (UID: \"ee7f866e-a97e-410e-abf9-b67082ac802f\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rpdr9" Dec 09 15:38:25 crc kubenswrapper[4716]: I1209 15:38:25.495780 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ee7f866e-a97e-410e-abf9-b67082ac802f-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-rpdr9\" (UID: \"ee7f866e-a97e-410e-abf9-b67082ac802f\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rpdr9" Dec 09 15:38:25 crc kubenswrapper[4716]: I1209 15:38:25.495895 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ee7f866e-a97e-410e-abf9-b67082ac802f-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-rpdr9\" (UID: \"ee7f866e-a97e-410e-abf9-b67082ac802f\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rpdr9" Dec 09 15:38:25 crc kubenswrapper[4716]: I1209 15:38:25.495953 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dk9gd\" (UniqueName: \"kubernetes.io/projected/ee7f866e-a97e-410e-abf9-b67082ac802f-kube-api-access-dk9gd\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-rpdr9\" (UID: \"ee7f866e-a97e-410e-abf9-b67082ac802f\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rpdr9" Dec 09 15:38:25 crc kubenswrapper[4716]: I1209 15:38:25.515581 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ee7f866e-a97e-410e-abf9-b67082ac802f-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-rpdr9\" (UID: \"ee7f866e-a97e-410e-abf9-b67082ac802f\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rpdr9" Dec 09 15:38:25 crc kubenswrapper[4716]: I1209 15:38:25.515581 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ee7f866e-a97e-410e-abf9-b67082ac802f-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-rpdr9\" (UID: \"ee7f866e-a97e-410e-abf9-b67082ac802f\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rpdr9" Dec 09 15:38:25 crc kubenswrapper[4716]: I1209 15:38:25.522946 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dk9gd\" (UniqueName: \"kubernetes.io/projected/ee7f866e-a97e-410e-abf9-b67082ac802f-kube-api-access-dk9gd\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-rpdr9\" (UID: \"ee7f866e-a97e-410e-abf9-b67082ac802f\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rpdr9" Dec 09 15:38:25 crc kubenswrapper[4716]: I1209 15:38:25.617698 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rpdr9" Dec 09 15:38:26 crc kubenswrapper[4716]: E1209 15:38:26.155606 4716 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6cd1bcab_3e4a_41f7_97a2_19818a1f3415.slice/crio-ef71d2686049fc78464c531dcec1285871eacf1f5143951e46466e6bb269bcbe.scope\": RecentStats: unable to find data in memory cache]" Dec 09 15:38:26 crc kubenswrapper[4716]: I1209 15:38:26.263443 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-rpdr9"] Dec 09 15:38:27 crc kubenswrapper[4716]: E1209 15:38:27.215885 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:38:27 crc kubenswrapper[4716]: I1209 15:38:27.239177 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rpdr9" event={"ID":"ee7f866e-a97e-410e-abf9-b67082ac802f","Type":"ContainerStarted","Data":"9da59f4c12bc9245a0314965ed8045daae4b5507c696f355ef7ee4e092dc8b04"} Dec 09 15:38:28 crc kubenswrapper[4716]: E1209 15:38:28.173227 4716 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6cd1bcab_3e4a_41f7_97a2_19818a1f3415.slice/crio-ef71d2686049fc78464c531dcec1285871eacf1f5143951e46466e6bb269bcbe.scope\": RecentStats: unable to find data in memory cache]" Dec 09 15:38:28 crc kubenswrapper[4716]: I1209 15:38:28.255459 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rpdr9" event={"ID":"ee7f866e-a97e-410e-abf9-b67082ac802f","Type":"ContainerStarted","Data":"90ef3d442e287a6e02bc15401b02db5ade7c76122eb1ac0a47574c312b514b9e"} Dec 09 15:38:28 crc kubenswrapper[4716]: I1209 15:38:28.289153 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rpdr9" podStartSLOduration=2.550028144 podStartE2EDuration="3.289124325s" podCreationTimestamp="2025-12-09 15:38:25 +0000 UTC" firstStartedPulling="2025-12-09 15:38:26.271191137 +0000 UTC m=+1793.425935125" lastFinishedPulling="2025-12-09 15:38:27.010287318 +0000 UTC m=+1794.165031306" observedRunningTime="2025-12-09 15:38:28.273028869 +0000 UTC m=+1795.427772867" watchObservedRunningTime="2025-12-09 15:38:28.289124325 +0000 UTC m=+1795.443868313" Dec 09 15:38:31 crc kubenswrapper[4716]: I1209 15:38:31.301726 4716 generic.go:334] "Generic (PLEG): container finished" podID="ee7f866e-a97e-410e-abf9-b67082ac802f" containerID="90ef3d442e287a6e02bc15401b02db5ade7c76122eb1ac0a47574c312b514b9e" exitCode=0 Dec 09 15:38:31 crc kubenswrapper[4716]: I1209 15:38:31.301825 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rpdr9" event={"ID":"ee7f866e-a97e-410e-abf9-b67082ac802f","Type":"ContainerDied","Data":"90ef3d442e287a6e02bc15401b02db5ade7c76122eb1ac0a47574c312b514b9e"} Dec 09 15:38:32 crc kubenswrapper[4716]: I1209 15:38:32.884733 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rpdr9" Dec 09 15:38:32 crc kubenswrapper[4716]: I1209 15:38:32.987272 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dk9gd\" (UniqueName: \"kubernetes.io/projected/ee7f866e-a97e-410e-abf9-b67082ac802f-kube-api-access-dk9gd\") pod \"ee7f866e-a97e-410e-abf9-b67082ac802f\" (UID: \"ee7f866e-a97e-410e-abf9-b67082ac802f\") " Dec 09 15:38:32 crc kubenswrapper[4716]: I1209 15:38:32.987512 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ee7f866e-a97e-410e-abf9-b67082ac802f-ssh-key\") pod \"ee7f866e-a97e-410e-abf9-b67082ac802f\" (UID: \"ee7f866e-a97e-410e-abf9-b67082ac802f\") " Dec 09 15:38:32 crc kubenswrapper[4716]: I1209 15:38:32.987667 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ee7f866e-a97e-410e-abf9-b67082ac802f-inventory\") pod \"ee7f866e-a97e-410e-abf9-b67082ac802f\" (UID: \"ee7f866e-a97e-410e-abf9-b67082ac802f\") " Dec 09 15:38:32 crc kubenswrapper[4716]: I1209 15:38:32.993515 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee7f866e-a97e-410e-abf9-b67082ac802f-kube-api-access-dk9gd" (OuterVolumeSpecName: "kube-api-access-dk9gd") pod "ee7f866e-a97e-410e-abf9-b67082ac802f" (UID: "ee7f866e-a97e-410e-abf9-b67082ac802f"). InnerVolumeSpecName "kube-api-access-dk9gd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:38:33 crc kubenswrapper[4716]: I1209 15:38:33.022000 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee7f866e-a97e-410e-abf9-b67082ac802f-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ee7f866e-a97e-410e-abf9-b67082ac802f" (UID: "ee7f866e-a97e-410e-abf9-b67082ac802f"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:38:33 crc kubenswrapper[4716]: I1209 15:38:33.023339 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee7f866e-a97e-410e-abf9-b67082ac802f-inventory" (OuterVolumeSpecName: "inventory") pod "ee7f866e-a97e-410e-abf9-b67082ac802f" (UID: "ee7f866e-a97e-410e-abf9-b67082ac802f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:38:33 crc kubenswrapper[4716]: I1209 15:38:33.090788 4716 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ee7f866e-a97e-410e-abf9-b67082ac802f-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 09 15:38:33 crc kubenswrapper[4716]: I1209 15:38:33.090826 4716 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ee7f866e-a97e-410e-abf9-b67082ac802f-inventory\") on node \"crc\" DevicePath \"\"" Dec 09 15:38:33 crc kubenswrapper[4716]: I1209 15:38:33.090841 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dk9gd\" (UniqueName: \"kubernetes.io/projected/ee7f866e-a97e-410e-abf9-b67082ac802f-kube-api-access-dk9gd\") on node \"crc\" DevicePath \"\"" Dec 09 15:38:33 crc kubenswrapper[4716]: I1209 15:38:33.327468 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rpdr9" event={"ID":"ee7f866e-a97e-410e-abf9-b67082ac802f","Type":"ContainerDied","Data":"9da59f4c12bc9245a0314965ed8045daae4b5507c696f355ef7ee4e092dc8b04"} Dec 09 15:38:33 crc kubenswrapper[4716]: I1209 15:38:33.327824 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9da59f4c12bc9245a0314965ed8045daae4b5507c696f355ef7ee4e092dc8b04" Dec 09 15:38:33 crc kubenswrapper[4716]: I1209 15:38:33.327748 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-rpdr9" Dec 09 15:38:33 crc kubenswrapper[4716]: I1209 15:38:33.429514 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-hblsv"] Dec 09 15:38:33 crc kubenswrapper[4716]: E1209 15:38:33.430474 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee7f866e-a97e-410e-abf9-b67082ac802f" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Dec 09 15:38:33 crc kubenswrapper[4716]: I1209 15:38:33.430603 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee7f866e-a97e-410e-abf9-b67082ac802f" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Dec 09 15:38:33 crc kubenswrapper[4716]: I1209 15:38:33.431151 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee7f866e-a97e-410e-abf9-b67082ac802f" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Dec 09 15:38:33 crc kubenswrapper[4716]: I1209 15:38:33.432400 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-hblsv" Dec 09 15:38:33 crc kubenswrapper[4716]: I1209 15:38:33.442637 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 09 15:38:33 crc kubenswrapper[4716]: I1209 15:38:33.442918 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 09 15:38:33 crc kubenswrapper[4716]: I1209 15:38:33.443071 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 09 15:38:33 crc kubenswrapper[4716]: I1209 15:38:33.443199 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-vjwcm" Dec 09 15:38:33 crc kubenswrapper[4716]: I1209 15:38:33.449880 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-hblsv"] Dec 09 15:38:33 crc kubenswrapper[4716]: I1209 15:38:33.505449 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-th257\" (UniqueName: \"kubernetes.io/projected/7a03690d-fbfb-4a36-9b27-ca857fcc88f1-kube-api-access-th257\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-hblsv\" (UID: \"7a03690d-fbfb-4a36-9b27-ca857fcc88f1\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-hblsv" Dec 09 15:38:33 crc kubenswrapper[4716]: I1209 15:38:33.505947 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7a03690d-fbfb-4a36-9b27-ca857fcc88f1-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-hblsv\" (UID: \"7a03690d-fbfb-4a36-9b27-ca857fcc88f1\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-hblsv" Dec 09 15:38:33 crc kubenswrapper[4716]: I1209 15:38:33.506354 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7a03690d-fbfb-4a36-9b27-ca857fcc88f1-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-hblsv\" (UID: \"7a03690d-fbfb-4a36-9b27-ca857fcc88f1\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-hblsv" Dec 09 15:38:33 crc kubenswrapper[4716]: I1209 15:38:33.506534 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a03690d-fbfb-4a36-9b27-ca857fcc88f1-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-hblsv\" (UID: \"7a03690d-fbfb-4a36-9b27-ca857fcc88f1\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-hblsv" Dec 09 15:38:33 crc kubenswrapper[4716]: I1209 15:38:33.609000 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7a03690d-fbfb-4a36-9b27-ca857fcc88f1-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-hblsv\" (UID: \"7a03690d-fbfb-4a36-9b27-ca857fcc88f1\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-hblsv" Dec 09 15:38:33 crc kubenswrapper[4716]: I1209 15:38:33.609095 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a03690d-fbfb-4a36-9b27-ca857fcc88f1-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-hblsv\" (UID: \"7a03690d-fbfb-4a36-9b27-ca857fcc88f1\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-hblsv" Dec 09 15:38:33 crc kubenswrapper[4716]: I1209 15:38:33.609147 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-th257\" (UniqueName: \"kubernetes.io/projected/7a03690d-fbfb-4a36-9b27-ca857fcc88f1-kube-api-access-th257\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-hblsv\" (UID: \"7a03690d-fbfb-4a36-9b27-ca857fcc88f1\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-hblsv" Dec 09 15:38:33 crc kubenswrapper[4716]: I1209 15:38:33.609187 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7a03690d-fbfb-4a36-9b27-ca857fcc88f1-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-hblsv\" (UID: \"7a03690d-fbfb-4a36-9b27-ca857fcc88f1\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-hblsv" Dec 09 15:38:33 crc kubenswrapper[4716]: I1209 15:38:33.615601 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7a03690d-fbfb-4a36-9b27-ca857fcc88f1-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-hblsv\" (UID: \"7a03690d-fbfb-4a36-9b27-ca857fcc88f1\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-hblsv" Dec 09 15:38:33 crc kubenswrapper[4716]: I1209 15:38:33.615887 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a03690d-fbfb-4a36-9b27-ca857fcc88f1-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-hblsv\" (UID: \"7a03690d-fbfb-4a36-9b27-ca857fcc88f1\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-hblsv" Dec 09 15:38:33 crc kubenswrapper[4716]: I1209 15:38:33.616024 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7a03690d-fbfb-4a36-9b27-ca857fcc88f1-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-hblsv\" (UID: \"7a03690d-fbfb-4a36-9b27-ca857fcc88f1\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-hblsv" Dec 09 15:38:33 crc kubenswrapper[4716]: I1209 15:38:33.634346 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-th257\" (UniqueName: \"kubernetes.io/projected/7a03690d-fbfb-4a36-9b27-ca857fcc88f1-kube-api-access-th257\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-hblsv\" (UID: \"7a03690d-fbfb-4a36-9b27-ca857fcc88f1\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-hblsv" Dec 09 15:38:33 crc kubenswrapper[4716]: I1209 15:38:33.769332 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-hblsv" Dec 09 15:38:34 crc kubenswrapper[4716]: E1209 15:38:34.215982 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:38:34 crc kubenswrapper[4716]: W1209 15:38:34.356118 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7a03690d_fbfb_4a36_9b27_ca857fcc88f1.slice/crio-a39000f89ba1b6c2d7151d9f5bdb83a7dcf5570c8e5066df9855cd9cea5e1a97 WatchSource:0}: Error finding container a39000f89ba1b6c2d7151d9f5bdb83a7dcf5570c8e5066df9855cd9cea5e1a97: Status 404 returned error can't find the container with id a39000f89ba1b6c2d7151d9f5bdb83a7dcf5570c8e5066df9855cd9cea5e1a97 Dec 09 15:38:34 crc kubenswrapper[4716]: I1209 15:38:34.363473 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-hblsv"] Dec 09 15:38:35 crc kubenswrapper[4716]: I1209 15:38:35.364943 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-hblsv" event={"ID":"7a03690d-fbfb-4a36-9b27-ca857fcc88f1","Type":"ContainerStarted","Data":"30acac6410b37ffa2afc9776eb9387a1fe379d293eb163f03ed2379d8fa8eb21"} Dec 09 15:38:35 crc kubenswrapper[4716]: I1209 15:38:35.366531 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-hblsv" event={"ID":"7a03690d-fbfb-4a36-9b27-ca857fcc88f1","Type":"ContainerStarted","Data":"a39000f89ba1b6c2d7151d9f5bdb83a7dcf5570c8e5066df9855cd9cea5e1a97"} Dec 09 15:38:35 crc kubenswrapper[4716]: I1209 15:38:35.384935 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-hblsv" podStartSLOduration=1.9553363620000002 podStartE2EDuration="2.384911304s" podCreationTimestamp="2025-12-09 15:38:33 +0000 UTC" firstStartedPulling="2025-12-09 15:38:34.362121633 +0000 UTC m=+1801.516865621" lastFinishedPulling="2025-12-09 15:38:34.791696575 +0000 UTC m=+1801.946440563" observedRunningTime="2025-12-09 15:38:35.384573204 +0000 UTC m=+1802.539317192" watchObservedRunningTime="2025-12-09 15:38:35.384911304 +0000 UTC m=+1802.539655302" Dec 09 15:38:39 crc kubenswrapper[4716]: I1209 15:38:39.214641 4716 scope.go:117] "RemoveContainer" containerID="f23d8f973b0c391889ecea36a999ba03ea85dda0d274716304ac66db87b6b8dc" Dec 09 15:38:39 crc kubenswrapper[4716]: E1209 15:38:39.215599 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:38:42 crc kubenswrapper[4716]: E1209 15:38:42.349044 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 09 15:38:42 crc kubenswrapper[4716]: E1209 15:38:42.349590 4716 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 09 15:38:42 crc kubenswrapper[4716]: E1209 15:38:42.350007 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qfm49,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-msshl_openstack(239c4119-944c-46e1-9425-285eeb6e0204): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 15:38:42 crc kubenswrapper[4716]: E1209 15:38:42.351301 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:38:47 crc kubenswrapper[4716]: E1209 15:38:47.332737 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 15:38:47 crc kubenswrapper[4716]: E1209 15:38:47.333389 4716 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 15:38:47 crc kubenswrapper[4716]: E1209 15:38:47.333583 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5fbh9fh77h667h669h94h589h59dh5bdh5bch9fh68h689h699h559h549h56fh5f5h68fh557h5cch7fh66ch6ch86h56h75h55fh58h5ffhf9h5fdq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lz724,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(29cebb2d-8cdb-49de-a29d-1d02808e46a9): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 15:38:47 crc kubenswrapper[4716]: E1209 15:38:47.334798 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:38:53 crc kubenswrapper[4716]: E1209 15:38:53.230757 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:38:54 crc kubenswrapper[4716]: I1209 15:38:54.214007 4716 scope.go:117] "RemoveContainer" containerID="f23d8f973b0c391889ecea36a999ba03ea85dda0d274716304ac66db87b6b8dc" Dec 09 15:38:54 crc kubenswrapper[4716]: E1209 15:38:54.214723 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:38:58 crc kubenswrapper[4716]: E1209 15:38:58.216209 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:39:04 crc kubenswrapper[4716]: E1209 15:39:04.216825 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:39:08 crc kubenswrapper[4716]: I1209 15:39:08.213820 4716 scope.go:117] "RemoveContainer" containerID="f23d8f973b0c391889ecea36a999ba03ea85dda0d274716304ac66db87b6b8dc" Dec 09 15:39:08 crc kubenswrapper[4716]: E1209 15:39:08.217479 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:39:09 crc kubenswrapper[4716]: I1209 15:39:09.549253 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-4z8zx"] Dec 09 15:39:09 crc kubenswrapper[4716]: I1209 15:39:09.552491 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4z8zx" Dec 09 15:39:09 crc kubenswrapper[4716]: I1209 15:39:09.562685 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4z8zx"] Dec 09 15:39:09 crc kubenswrapper[4716]: I1209 15:39:09.678432 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/382d6c66-2bf2-4240-99ca-b71b255a07c3-utilities\") pod \"community-operators-4z8zx\" (UID: \"382d6c66-2bf2-4240-99ca-b71b255a07c3\") " pod="openshift-marketplace/community-operators-4z8zx" Dec 09 15:39:09 crc kubenswrapper[4716]: I1209 15:39:09.678973 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5xtxs\" (UniqueName: \"kubernetes.io/projected/382d6c66-2bf2-4240-99ca-b71b255a07c3-kube-api-access-5xtxs\") pod \"community-operators-4z8zx\" (UID: \"382d6c66-2bf2-4240-99ca-b71b255a07c3\") " pod="openshift-marketplace/community-operators-4z8zx" Dec 09 15:39:09 crc kubenswrapper[4716]: I1209 15:39:09.679067 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/382d6c66-2bf2-4240-99ca-b71b255a07c3-catalog-content\") pod \"community-operators-4z8zx\" (UID: \"382d6c66-2bf2-4240-99ca-b71b255a07c3\") " pod="openshift-marketplace/community-operators-4z8zx" Dec 09 15:39:09 crc kubenswrapper[4716]: I1209 15:39:09.736339 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-brx57"] Dec 09 15:39:09 crc kubenswrapper[4716]: I1209 15:39:09.739427 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-brx57" Dec 09 15:39:09 crc kubenswrapper[4716]: I1209 15:39:09.785966 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-brx57"] Dec 09 15:39:09 crc kubenswrapper[4716]: I1209 15:39:09.789139 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5xtxs\" (UniqueName: \"kubernetes.io/projected/382d6c66-2bf2-4240-99ca-b71b255a07c3-kube-api-access-5xtxs\") pod \"community-operators-4z8zx\" (UID: \"382d6c66-2bf2-4240-99ca-b71b255a07c3\") " pod="openshift-marketplace/community-operators-4z8zx" Dec 09 15:39:09 crc kubenswrapper[4716]: I1209 15:39:09.789362 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/382d6c66-2bf2-4240-99ca-b71b255a07c3-catalog-content\") pod \"community-operators-4z8zx\" (UID: \"382d6c66-2bf2-4240-99ca-b71b255a07c3\") " pod="openshift-marketplace/community-operators-4z8zx" Dec 09 15:39:09 crc kubenswrapper[4716]: I1209 15:39:09.789917 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/382d6c66-2bf2-4240-99ca-b71b255a07c3-utilities\") pod \"community-operators-4z8zx\" (UID: \"382d6c66-2bf2-4240-99ca-b71b255a07c3\") " pod="openshift-marketplace/community-operators-4z8zx" Dec 09 15:39:09 crc kubenswrapper[4716]: I1209 15:39:09.790919 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/382d6c66-2bf2-4240-99ca-b71b255a07c3-catalog-content\") pod \"community-operators-4z8zx\" (UID: \"382d6c66-2bf2-4240-99ca-b71b255a07c3\") " pod="openshift-marketplace/community-operators-4z8zx" Dec 09 15:39:09 crc kubenswrapper[4716]: I1209 15:39:09.791644 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/382d6c66-2bf2-4240-99ca-b71b255a07c3-utilities\") pod \"community-operators-4z8zx\" (UID: \"382d6c66-2bf2-4240-99ca-b71b255a07c3\") " pod="openshift-marketplace/community-operators-4z8zx" Dec 09 15:39:09 crc kubenswrapper[4716]: I1209 15:39:09.809508 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5xtxs\" (UniqueName: \"kubernetes.io/projected/382d6c66-2bf2-4240-99ca-b71b255a07c3-kube-api-access-5xtxs\") pod \"community-operators-4z8zx\" (UID: \"382d6c66-2bf2-4240-99ca-b71b255a07c3\") " pod="openshift-marketplace/community-operators-4z8zx" Dec 09 15:39:09 crc kubenswrapper[4716]: I1209 15:39:09.880451 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4z8zx" Dec 09 15:39:09 crc kubenswrapper[4716]: I1209 15:39:09.892479 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2fda88cc-680a-4a33-a9ea-547cf12e50d9-utilities\") pod \"redhat-operators-brx57\" (UID: \"2fda88cc-680a-4a33-a9ea-547cf12e50d9\") " pod="openshift-marketplace/redhat-operators-brx57" Dec 09 15:39:09 crc kubenswrapper[4716]: I1209 15:39:09.892539 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pvndw\" (UniqueName: \"kubernetes.io/projected/2fda88cc-680a-4a33-a9ea-547cf12e50d9-kube-api-access-pvndw\") pod \"redhat-operators-brx57\" (UID: \"2fda88cc-680a-4a33-a9ea-547cf12e50d9\") " pod="openshift-marketplace/redhat-operators-brx57" Dec 09 15:39:09 crc kubenswrapper[4716]: I1209 15:39:09.893069 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2fda88cc-680a-4a33-a9ea-547cf12e50d9-catalog-content\") pod \"redhat-operators-brx57\" (UID: \"2fda88cc-680a-4a33-a9ea-547cf12e50d9\") " pod="openshift-marketplace/redhat-operators-brx57" Dec 09 15:39:09 crc kubenswrapper[4716]: I1209 15:39:09.998204 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2fda88cc-680a-4a33-a9ea-547cf12e50d9-utilities\") pod \"redhat-operators-brx57\" (UID: \"2fda88cc-680a-4a33-a9ea-547cf12e50d9\") " pod="openshift-marketplace/redhat-operators-brx57" Dec 09 15:39:09 crc kubenswrapper[4716]: I1209 15:39:09.998283 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pvndw\" (UniqueName: \"kubernetes.io/projected/2fda88cc-680a-4a33-a9ea-547cf12e50d9-kube-api-access-pvndw\") pod \"redhat-operators-brx57\" (UID: \"2fda88cc-680a-4a33-a9ea-547cf12e50d9\") " pod="openshift-marketplace/redhat-operators-brx57" Dec 09 15:39:09 crc kubenswrapper[4716]: I1209 15:39:09.998396 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2fda88cc-680a-4a33-a9ea-547cf12e50d9-catalog-content\") pod \"redhat-operators-brx57\" (UID: \"2fda88cc-680a-4a33-a9ea-547cf12e50d9\") " pod="openshift-marketplace/redhat-operators-brx57" Dec 09 15:39:09 crc kubenswrapper[4716]: I1209 15:39:09.998927 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2fda88cc-680a-4a33-a9ea-547cf12e50d9-utilities\") pod \"redhat-operators-brx57\" (UID: \"2fda88cc-680a-4a33-a9ea-547cf12e50d9\") " pod="openshift-marketplace/redhat-operators-brx57" Dec 09 15:39:09 crc kubenswrapper[4716]: I1209 15:39:09.999001 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2fda88cc-680a-4a33-a9ea-547cf12e50d9-catalog-content\") pod \"redhat-operators-brx57\" (UID: \"2fda88cc-680a-4a33-a9ea-547cf12e50d9\") " pod="openshift-marketplace/redhat-operators-brx57" Dec 09 15:39:10 crc kubenswrapper[4716]: I1209 15:39:10.021823 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pvndw\" (UniqueName: \"kubernetes.io/projected/2fda88cc-680a-4a33-a9ea-547cf12e50d9-kube-api-access-pvndw\") pod \"redhat-operators-brx57\" (UID: \"2fda88cc-680a-4a33-a9ea-547cf12e50d9\") " pod="openshift-marketplace/redhat-operators-brx57" Dec 09 15:39:10 crc kubenswrapper[4716]: I1209 15:39:10.076928 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-brx57" Dec 09 15:39:10 crc kubenswrapper[4716]: I1209 15:39:10.390493 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4z8zx"] Dec 09 15:39:10 crc kubenswrapper[4716]: W1209 15:39:10.408579 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod382d6c66_2bf2_4240_99ca_b71b255a07c3.slice/crio-9c331c4108dd635c0ffabb56fc2bb54218ce18bf0d9b4dae544e537b9bb2d00c WatchSource:0}: Error finding container 9c331c4108dd635c0ffabb56fc2bb54218ce18bf0d9b4dae544e537b9bb2d00c: Status 404 returned error can't find the container with id 9c331c4108dd635c0ffabb56fc2bb54218ce18bf0d9b4dae544e537b9bb2d00c Dec 09 15:39:10 crc kubenswrapper[4716]: I1209 15:39:10.619065 4716 scope.go:117] "RemoveContainer" containerID="144be49e7153729fc6b17d2974c938a069e5b19748948b71728ee1a36e2b52a0" Dec 09 15:39:10 crc kubenswrapper[4716]: I1209 15:39:10.645501 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-brx57"] Dec 09 15:39:10 crc kubenswrapper[4716]: I1209 15:39:10.693433 4716 scope.go:117] "RemoveContainer" containerID="dff61c3808e480eb5344a9679d346371a6ed83e46e414d3b1288cb4d60a7b29a" Dec 09 15:39:10 crc kubenswrapper[4716]: I1209 15:39:10.738769 4716 scope.go:117] "RemoveContainer" containerID="e0b3bce6e4f911d56460e7e6c57e92cb0818375ba9c445e1cb2d33a6fcc8253c" Dec 09 15:39:10 crc kubenswrapper[4716]: I1209 15:39:10.770130 4716 scope.go:117] "RemoveContainer" containerID="c2fa73a1329bbedc77ddddf004e0c6c1f8f493e99de0f806acb9cf4f307e172c" Dec 09 15:39:10 crc kubenswrapper[4716]: I1209 15:39:10.809024 4716 scope.go:117] "RemoveContainer" containerID="f9b61364f0ef1deff6be397a1c18bbaf24bc9a0bbec192df0cc269de6d21535b" Dec 09 15:39:10 crc kubenswrapper[4716]: I1209 15:39:10.908520 4716 scope.go:117] "RemoveContainer" containerID="d6a42dcf537008f4f39a0f551b4a0d479fc86d2c5cf5f63a92c092513609ccf6" Dec 09 15:39:11 crc kubenswrapper[4716]: I1209 15:39:11.022576 4716 generic.go:334] "Generic (PLEG): container finished" podID="2fda88cc-680a-4a33-a9ea-547cf12e50d9" containerID="ccf749ef658d028d6cc3c8542967f65f61ab7f298694f21a77f877620652234a" exitCode=0 Dec 09 15:39:11 crc kubenswrapper[4716]: I1209 15:39:11.022690 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-brx57" event={"ID":"2fda88cc-680a-4a33-a9ea-547cf12e50d9","Type":"ContainerDied","Data":"ccf749ef658d028d6cc3c8542967f65f61ab7f298694f21a77f877620652234a"} Dec 09 15:39:11 crc kubenswrapper[4716]: I1209 15:39:11.022763 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-brx57" event={"ID":"2fda88cc-680a-4a33-a9ea-547cf12e50d9","Type":"ContainerStarted","Data":"70840a0aa54a94a9e9d1ed7c86b41150eb075d7eabf0349374dae35a350d0ec2"} Dec 09 15:39:11 crc kubenswrapper[4716]: I1209 15:39:11.027885 4716 generic.go:334] "Generic (PLEG): container finished" podID="382d6c66-2bf2-4240-99ca-b71b255a07c3" containerID="e9d1a0deadf1c188849bf75839b0bbefff8b2479effa195c6f8949b4891906bf" exitCode=0 Dec 09 15:39:11 crc kubenswrapper[4716]: I1209 15:39:11.027949 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4z8zx" event={"ID":"382d6c66-2bf2-4240-99ca-b71b255a07c3","Type":"ContainerDied","Data":"e9d1a0deadf1c188849bf75839b0bbefff8b2479effa195c6f8949b4891906bf"} Dec 09 15:39:11 crc kubenswrapper[4716]: I1209 15:39:11.028004 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4z8zx" event={"ID":"382d6c66-2bf2-4240-99ca-b71b255a07c3","Type":"ContainerStarted","Data":"9c331c4108dd635c0ffabb56fc2bb54218ce18bf0d9b4dae544e537b9bb2d00c"} Dec 09 15:39:11 crc kubenswrapper[4716]: E1209 15:39:11.216410 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:39:11 crc kubenswrapper[4716]: I1209 15:39:11.946019 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-w4kqs"] Dec 09 15:39:11 crc kubenswrapper[4716]: I1209 15:39:11.951234 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w4kqs" Dec 09 15:39:11 crc kubenswrapper[4716]: I1209 15:39:11.973299 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-w4kqs"] Dec 09 15:39:12 crc kubenswrapper[4716]: I1209 15:39:12.045253 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4z8zx" event={"ID":"382d6c66-2bf2-4240-99ca-b71b255a07c3","Type":"ContainerStarted","Data":"53dcab9f1a907a17398f73489b77555030ba22d774eca2807c6e6025b2f0db22"} Dec 09 15:39:12 crc kubenswrapper[4716]: I1209 15:39:12.055900 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vhc2c\" (UniqueName: \"kubernetes.io/projected/58978f24-d8f6-46f2-bb16-d0bb52c93b2a-kube-api-access-vhc2c\") pod \"certified-operators-w4kqs\" (UID: \"58978f24-d8f6-46f2-bb16-d0bb52c93b2a\") " pod="openshift-marketplace/certified-operators-w4kqs" Dec 09 15:39:12 crc kubenswrapper[4716]: I1209 15:39:12.056095 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58978f24-d8f6-46f2-bb16-d0bb52c93b2a-utilities\") pod \"certified-operators-w4kqs\" (UID: \"58978f24-d8f6-46f2-bb16-d0bb52c93b2a\") " pod="openshift-marketplace/certified-operators-w4kqs" Dec 09 15:39:12 crc kubenswrapper[4716]: I1209 15:39:12.056256 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58978f24-d8f6-46f2-bb16-d0bb52c93b2a-catalog-content\") pod \"certified-operators-w4kqs\" (UID: \"58978f24-d8f6-46f2-bb16-d0bb52c93b2a\") " pod="openshift-marketplace/certified-operators-w4kqs" Dec 09 15:39:12 crc kubenswrapper[4716]: I1209 15:39:12.159358 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vhc2c\" (UniqueName: \"kubernetes.io/projected/58978f24-d8f6-46f2-bb16-d0bb52c93b2a-kube-api-access-vhc2c\") pod \"certified-operators-w4kqs\" (UID: \"58978f24-d8f6-46f2-bb16-d0bb52c93b2a\") " pod="openshift-marketplace/certified-operators-w4kqs" Dec 09 15:39:12 crc kubenswrapper[4716]: I1209 15:39:12.159542 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58978f24-d8f6-46f2-bb16-d0bb52c93b2a-utilities\") pod \"certified-operators-w4kqs\" (UID: \"58978f24-d8f6-46f2-bb16-d0bb52c93b2a\") " pod="openshift-marketplace/certified-operators-w4kqs" Dec 09 15:39:12 crc kubenswrapper[4716]: I1209 15:39:12.159703 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58978f24-d8f6-46f2-bb16-d0bb52c93b2a-catalog-content\") pod \"certified-operators-w4kqs\" (UID: \"58978f24-d8f6-46f2-bb16-d0bb52c93b2a\") " pod="openshift-marketplace/certified-operators-w4kqs" Dec 09 15:39:12 crc kubenswrapper[4716]: I1209 15:39:12.160087 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58978f24-d8f6-46f2-bb16-d0bb52c93b2a-utilities\") pod \"certified-operators-w4kqs\" (UID: \"58978f24-d8f6-46f2-bb16-d0bb52c93b2a\") " pod="openshift-marketplace/certified-operators-w4kqs" Dec 09 15:39:12 crc kubenswrapper[4716]: I1209 15:39:12.160268 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58978f24-d8f6-46f2-bb16-d0bb52c93b2a-catalog-content\") pod \"certified-operators-w4kqs\" (UID: \"58978f24-d8f6-46f2-bb16-d0bb52c93b2a\") " pod="openshift-marketplace/certified-operators-w4kqs" Dec 09 15:39:12 crc kubenswrapper[4716]: I1209 15:39:12.179126 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vhc2c\" (UniqueName: \"kubernetes.io/projected/58978f24-d8f6-46f2-bb16-d0bb52c93b2a-kube-api-access-vhc2c\") pod \"certified-operators-w4kqs\" (UID: \"58978f24-d8f6-46f2-bb16-d0bb52c93b2a\") " pod="openshift-marketplace/certified-operators-w4kqs" Dec 09 15:39:12 crc kubenswrapper[4716]: I1209 15:39:12.297699 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w4kqs" Dec 09 15:39:12 crc kubenswrapper[4716]: I1209 15:39:12.994756 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-w4kqs"] Dec 09 15:39:13 crc kubenswrapper[4716]: I1209 15:39:13.068864 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w4kqs" event={"ID":"58978f24-d8f6-46f2-bb16-d0bb52c93b2a","Type":"ContainerStarted","Data":"f4ebb488ee83306c72f75fd0c4bd26eb361e14de48cf81f9ee50c5d9831bd7da"} Dec 09 15:39:14 crc kubenswrapper[4716]: I1209 15:39:14.091401 4716 generic.go:334] "Generic (PLEG): container finished" podID="382d6c66-2bf2-4240-99ca-b71b255a07c3" containerID="53dcab9f1a907a17398f73489b77555030ba22d774eca2807c6e6025b2f0db22" exitCode=0 Dec 09 15:39:14 crc kubenswrapper[4716]: I1209 15:39:14.091493 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4z8zx" event={"ID":"382d6c66-2bf2-4240-99ca-b71b255a07c3","Type":"ContainerDied","Data":"53dcab9f1a907a17398f73489b77555030ba22d774eca2807c6e6025b2f0db22"} Dec 09 15:39:14 crc kubenswrapper[4716]: I1209 15:39:14.100328 4716 generic.go:334] "Generic (PLEG): container finished" podID="58978f24-d8f6-46f2-bb16-d0bb52c93b2a" containerID="21ef2473fb186cbdb7dcba905e00fe01218d5c995a15715b2fbe828038227a07" exitCode=0 Dec 09 15:39:14 crc kubenswrapper[4716]: I1209 15:39:14.100391 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w4kqs" event={"ID":"58978f24-d8f6-46f2-bb16-d0bb52c93b2a","Type":"ContainerDied","Data":"21ef2473fb186cbdb7dcba905e00fe01218d5c995a15715b2fbe828038227a07"} Dec 09 15:39:15 crc kubenswrapper[4716]: I1209 15:39:15.121899 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4z8zx" event={"ID":"382d6c66-2bf2-4240-99ca-b71b255a07c3","Type":"ContainerStarted","Data":"c3abb4fb2138e66cb40c0a723ec1f855503c1bb724d38f87c66fab463f516995"} Dec 09 15:39:15 crc kubenswrapper[4716]: I1209 15:39:15.125498 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w4kqs" event={"ID":"58978f24-d8f6-46f2-bb16-d0bb52c93b2a","Type":"ContainerStarted","Data":"0491055e3030f76990a631c323399a51bb91174310f23d40ab015a9b35cf8cd3"} Dec 09 15:39:15 crc kubenswrapper[4716]: I1209 15:39:15.150652 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-4z8zx" podStartSLOduration=2.6599256540000003 podStartE2EDuration="6.150609062s" podCreationTimestamp="2025-12-09 15:39:09 +0000 UTC" firstStartedPulling="2025-12-09 15:39:11.035521651 +0000 UTC m=+1838.190265639" lastFinishedPulling="2025-12-09 15:39:14.526205059 +0000 UTC m=+1841.680949047" observedRunningTime="2025-12-09 15:39:15.145206959 +0000 UTC m=+1842.299950967" watchObservedRunningTime="2025-12-09 15:39:15.150609062 +0000 UTC m=+1842.305353050" Dec 09 15:39:17 crc kubenswrapper[4716]: I1209 15:39:17.188951 4716 generic.go:334] "Generic (PLEG): container finished" podID="58978f24-d8f6-46f2-bb16-d0bb52c93b2a" containerID="0491055e3030f76990a631c323399a51bb91174310f23d40ab015a9b35cf8cd3" exitCode=0 Dec 09 15:39:17 crc kubenswrapper[4716]: I1209 15:39:17.189117 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w4kqs" event={"ID":"58978f24-d8f6-46f2-bb16-d0bb52c93b2a","Type":"ContainerDied","Data":"0491055e3030f76990a631c323399a51bb91174310f23d40ab015a9b35cf8cd3"} Dec 09 15:39:17 crc kubenswrapper[4716]: E1209 15:39:17.217732 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:39:19 crc kubenswrapper[4716]: I1209 15:39:19.880869 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-4z8zx" Dec 09 15:39:19 crc kubenswrapper[4716]: I1209 15:39:19.881406 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-4z8zx" Dec 09 15:39:20 crc kubenswrapper[4716]: I1209 15:39:20.931407 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-4z8zx" podUID="382d6c66-2bf2-4240-99ca-b71b255a07c3" containerName="registry-server" probeResult="failure" output=< Dec 09 15:39:20 crc kubenswrapper[4716]: timeout: failed to connect service ":50051" within 1s Dec 09 15:39:20 crc kubenswrapper[4716]: > Dec 09 15:39:22 crc kubenswrapper[4716]: I1209 15:39:22.214702 4716 scope.go:117] "RemoveContainer" containerID="f23d8f973b0c391889ecea36a999ba03ea85dda0d274716304ac66db87b6b8dc" Dec 09 15:39:22 crc kubenswrapper[4716]: E1209 15:39:22.215094 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:39:22 crc kubenswrapper[4716]: E1209 15:39:22.216859 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:39:23 crc kubenswrapper[4716]: I1209 15:39:23.268342 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-brx57" event={"ID":"2fda88cc-680a-4a33-a9ea-547cf12e50d9","Type":"ContainerStarted","Data":"ebed3b352756bc9e33b40b3d995e1dc664e8e3bd03e88019297120219ed34c1c"} Dec 09 15:39:24 crc kubenswrapper[4716]: I1209 15:39:24.283042 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w4kqs" event={"ID":"58978f24-d8f6-46f2-bb16-d0bb52c93b2a","Type":"ContainerStarted","Data":"e751bdd5f9c7e8b0de53cbb8a9e538eec886e5868136d29b153756b0f42fe968"} Dec 09 15:39:24 crc kubenswrapper[4716]: I1209 15:39:24.310812 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-w4kqs" podStartSLOduration=4.545682696 podStartE2EDuration="13.310784436s" podCreationTimestamp="2025-12-09 15:39:11 +0000 UTC" firstStartedPulling="2025-12-09 15:39:14.102884224 +0000 UTC m=+1841.257628212" lastFinishedPulling="2025-12-09 15:39:22.867985964 +0000 UTC m=+1850.022729952" observedRunningTime="2025-12-09 15:39:24.299036073 +0000 UTC m=+1851.453780071" watchObservedRunningTime="2025-12-09 15:39:24.310784436 +0000 UTC m=+1851.465528424" Dec 09 15:39:29 crc kubenswrapper[4716]: I1209 15:39:29.380868 4716 generic.go:334] "Generic (PLEG): container finished" podID="2fda88cc-680a-4a33-a9ea-547cf12e50d9" containerID="ebed3b352756bc9e33b40b3d995e1dc664e8e3bd03e88019297120219ed34c1c" exitCode=0 Dec 09 15:39:29 crc kubenswrapper[4716]: I1209 15:39:29.381290 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-brx57" event={"ID":"2fda88cc-680a-4a33-a9ea-547cf12e50d9","Type":"ContainerDied","Data":"ebed3b352756bc9e33b40b3d995e1dc664e8e3bd03e88019297120219ed34c1c"} Dec 09 15:39:29 crc kubenswrapper[4716]: I1209 15:39:29.947811 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-4z8zx" Dec 09 15:39:30 crc kubenswrapper[4716]: I1209 15:39:30.004919 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-4z8zx" Dec 09 15:39:30 crc kubenswrapper[4716]: I1209 15:39:30.398303 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-brx57" event={"ID":"2fda88cc-680a-4a33-a9ea-547cf12e50d9","Type":"ContainerStarted","Data":"1b4a542f250b35995cbad13fa30df218c098f88a713534bc2b68d5de22a84d32"} Dec 09 15:39:30 crc kubenswrapper[4716]: I1209 15:39:30.450474 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-brx57" podStartSLOduration=2.462940421 podStartE2EDuration="21.450450534s" podCreationTimestamp="2025-12-09 15:39:09 +0000 UTC" firstStartedPulling="2025-12-09 15:39:11.0312677 +0000 UTC m=+1838.186011688" lastFinishedPulling="2025-12-09 15:39:30.018777813 +0000 UTC m=+1857.173521801" observedRunningTime="2025-12-09 15:39:30.442405296 +0000 UTC m=+1857.597149284" watchObservedRunningTime="2025-12-09 15:39:30.450450534 +0000 UTC m=+1857.605194522" Dec 09 15:39:30 crc kubenswrapper[4716]: I1209 15:39:30.627712 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4z8zx"] Dec 09 15:39:31 crc kubenswrapper[4716]: E1209 15:39:31.216578 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:39:31 crc kubenswrapper[4716]: I1209 15:39:31.409559 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-4z8zx" podUID="382d6c66-2bf2-4240-99ca-b71b255a07c3" containerName="registry-server" containerID="cri-o://c3abb4fb2138e66cb40c0a723ec1f855503c1bb724d38f87c66fab463f516995" gracePeriod=2 Dec 09 15:39:32 crc kubenswrapper[4716]: I1209 15:39:32.298346 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-w4kqs" Dec 09 15:39:32 crc kubenswrapper[4716]: I1209 15:39:32.298641 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-w4kqs" Dec 09 15:39:32 crc kubenswrapper[4716]: I1209 15:39:32.430773 4716 generic.go:334] "Generic (PLEG): container finished" podID="382d6c66-2bf2-4240-99ca-b71b255a07c3" containerID="c3abb4fb2138e66cb40c0a723ec1f855503c1bb724d38f87c66fab463f516995" exitCode=0 Dec 09 15:39:32 crc kubenswrapper[4716]: I1209 15:39:32.430958 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4z8zx" event={"ID":"382d6c66-2bf2-4240-99ca-b71b255a07c3","Type":"ContainerDied","Data":"c3abb4fb2138e66cb40c0a723ec1f855503c1bb724d38f87c66fab463f516995"} Dec 09 15:39:32 crc kubenswrapper[4716]: I1209 15:39:32.431101 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4z8zx" event={"ID":"382d6c66-2bf2-4240-99ca-b71b255a07c3","Type":"ContainerDied","Data":"9c331c4108dd635c0ffabb56fc2bb54218ce18bf0d9b4dae544e537b9bb2d00c"} Dec 09 15:39:32 crc kubenswrapper[4716]: I1209 15:39:32.431119 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9c331c4108dd635c0ffabb56fc2bb54218ce18bf0d9b4dae544e537b9bb2d00c" Dec 09 15:39:32 crc kubenswrapper[4716]: I1209 15:39:32.492207 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4z8zx" Dec 09 15:39:32 crc kubenswrapper[4716]: I1209 15:39:32.563816 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/382d6c66-2bf2-4240-99ca-b71b255a07c3-catalog-content\") pod \"382d6c66-2bf2-4240-99ca-b71b255a07c3\" (UID: \"382d6c66-2bf2-4240-99ca-b71b255a07c3\") " Dec 09 15:39:32 crc kubenswrapper[4716]: I1209 15:39:32.563897 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5xtxs\" (UniqueName: \"kubernetes.io/projected/382d6c66-2bf2-4240-99ca-b71b255a07c3-kube-api-access-5xtxs\") pod \"382d6c66-2bf2-4240-99ca-b71b255a07c3\" (UID: \"382d6c66-2bf2-4240-99ca-b71b255a07c3\") " Dec 09 15:39:32 crc kubenswrapper[4716]: I1209 15:39:32.564342 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/382d6c66-2bf2-4240-99ca-b71b255a07c3-utilities\") pod \"382d6c66-2bf2-4240-99ca-b71b255a07c3\" (UID: \"382d6c66-2bf2-4240-99ca-b71b255a07c3\") " Dec 09 15:39:32 crc kubenswrapper[4716]: I1209 15:39:32.565004 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/382d6c66-2bf2-4240-99ca-b71b255a07c3-utilities" (OuterVolumeSpecName: "utilities") pod "382d6c66-2bf2-4240-99ca-b71b255a07c3" (UID: "382d6c66-2bf2-4240-99ca-b71b255a07c3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:39:32 crc kubenswrapper[4716]: I1209 15:39:32.565777 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/382d6c66-2bf2-4240-99ca-b71b255a07c3-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 15:39:32 crc kubenswrapper[4716]: I1209 15:39:32.581651 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/382d6c66-2bf2-4240-99ca-b71b255a07c3-kube-api-access-5xtxs" (OuterVolumeSpecName: "kube-api-access-5xtxs") pod "382d6c66-2bf2-4240-99ca-b71b255a07c3" (UID: "382d6c66-2bf2-4240-99ca-b71b255a07c3"). InnerVolumeSpecName "kube-api-access-5xtxs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:39:32 crc kubenswrapper[4716]: I1209 15:39:32.618746 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/382d6c66-2bf2-4240-99ca-b71b255a07c3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "382d6c66-2bf2-4240-99ca-b71b255a07c3" (UID: "382d6c66-2bf2-4240-99ca-b71b255a07c3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:39:32 crc kubenswrapper[4716]: I1209 15:39:32.668014 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/382d6c66-2bf2-4240-99ca-b71b255a07c3-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 15:39:32 crc kubenswrapper[4716]: I1209 15:39:32.668403 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5xtxs\" (UniqueName: \"kubernetes.io/projected/382d6c66-2bf2-4240-99ca-b71b255a07c3-kube-api-access-5xtxs\") on node \"crc\" DevicePath \"\"" Dec 09 15:39:33 crc kubenswrapper[4716]: I1209 15:39:33.360297 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-w4kqs" podUID="58978f24-d8f6-46f2-bb16-d0bb52c93b2a" containerName="registry-server" probeResult="failure" output=< Dec 09 15:39:33 crc kubenswrapper[4716]: timeout: failed to connect service ":50051" within 1s Dec 09 15:39:33 crc kubenswrapper[4716]: > Dec 09 15:39:33 crc kubenswrapper[4716]: I1209 15:39:33.443049 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4z8zx" Dec 09 15:39:33 crc kubenswrapper[4716]: I1209 15:39:33.468250 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4z8zx"] Dec 09 15:39:33 crc kubenswrapper[4716]: I1209 15:39:33.484119 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-4z8zx"] Dec 09 15:39:34 crc kubenswrapper[4716]: I1209 15:39:34.214148 4716 scope.go:117] "RemoveContainer" containerID="f23d8f973b0c391889ecea36a999ba03ea85dda0d274716304ac66db87b6b8dc" Dec 09 15:39:34 crc kubenswrapper[4716]: E1209 15:39:34.214584 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:39:35 crc kubenswrapper[4716]: I1209 15:39:35.229201 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="382d6c66-2bf2-4240-99ca-b71b255a07c3" path="/var/lib/kubelet/pods/382d6c66-2bf2-4240-99ca-b71b255a07c3/volumes" Dec 09 15:39:36 crc kubenswrapper[4716]: E1209 15:39:36.217281 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:39:40 crc kubenswrapper[4716]: I1209 15:39:40.077214 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-brx57" Dec 09 15:39:40 crc kubenswrapper[4716]: I1209 15:39:40.077848 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-brx57" Dec 09 15:39:41 crc kubenswrapper[4716]: I1209 15:39:41.126371 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-brx57" podUID="2fda88cc-680a-4a33-a9ea-547cf12e50d9" containerName="registry-server" probeResult="failure" output=< Dec 09 15:39:41 crc kubenswrapper[4716]: timeout: failed to connect service ":50051" within 1s Dec 09 15:39:41 crc kubenswrapper[4716]: > Dec 09 15:39:42 crc kubenswrapper[4716]: I1209 15:39:42.383591 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-w4kqs" Dec 09 15:39:42 crc kubenswrapper[4716]: I1209 15:39:42.439406 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-w4kqs" Dec 09 15:39:43 crc kubenswrapper[4716]: I1209 15:39:43.141088 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-w4kqs"] Dec 09 15:39:43 crc kubenswrapper[4716]: I1209 15:39:43.574478 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-w4kqs" podUID="58978f24-d8f6-46f2-bb16-d0bb52c93b2a" containerName="registry-server" containerID="cri-o://e751bdd5f9c7e8b0de53cbb8a9e538eec886e5868136d29b153756b0f42fe968" gracePeriod=2 Dec 09 15:39:44 crc kubenswrapper[4716]: I1209 15:39:44.067957 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w4kqs" Dec 09 15:39:44 crc kubenswrapper[4716]: I1209 15:39:44.161436 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vhc2c\" (UniqueName: \"kubernetes.io/projected/58978f24-d8f6-46f2-bb16-d0bb52c93b2a-kube-api-access-vhc2c\") pod \"58978f24-d8f6-46f2-bb16-d0bb52c93b2a\" (UID: \"58978f24-d8f6-46f2-bb16-d0bb52c93b2a\") " Dec 09 15:39:44 crc kubenswrapper[4716]: I1209 15:39:44.161734 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58978f24-d8f6-46f2-bb16-d0bb52c93b2a-utilities\") pod \"58978f24-d8f6-46f2-bb16-d0bb52c93b2a\" (UID: \"58978f24-d8f6-46f2-bb16-d0bb52c93b2a\") " Dec 09 15:39:44 crc kubenswrapper[4716]: I1209 15:39:44.161771 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58978f24-d8f6-46f2-bb16-d0bb52c93b2a-catalog-content\") pod \"58978f24-d8f6-46f2-bb16-d0bb52c93b2a\" (UID: \"58978f24-d8f6-46f2-bb16-d0bb52c93b2a\") " Dec 09 15:39:44 crc kubenswrapper[4716]: I1209 15:39:44.162382 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/58978f24-d8f6-46f2-bb16-d0bb52c93b2a-utilities" (OuterVolumeSpecName: "utilities") pod "58978f24-d8f6-46f2-bb16-d0bb52c93b2a" (UID: "58978f24-d8f6-46f2-bb16-d0bb52c93b2a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:39:44 crc kubenswrapper[4716]: I1209 15:39:44.167375 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58978f24-d8f6-46f2-bb16-d0bb52c93b2a-kube-api-access-vhc2c" (OuterVolumeSpecName: "kube-api-access-vhc2c") pod "58978f24-d8f6-46f2-bb16-d0bb52c93b2a" (UID: "58978f24-d8f6-46f2-bb16-d0bb52c93b2a"). InnerVolumeSpecName "kube-api-access-vhc2c". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:39:44 crc kubenswrapper[4716]: I1209 15:39:44.216199 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/58978f24-d8f6-46f2-bb16-d0bb52c93b2a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "58978f24-d8f6-46f2-bb16-d0bb52c93b2a" (UID: "58978f24-d8f6-46f2-bb16-d0bb52c93b2a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:39:44 crc kubenswrapper[4716]: I1209 15:39:44.265712 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vhc2c\" (UniqueName: \"kubernetes.io/projected/58978f24-d8f6-46f2-bb16-d0bb52c93b2a-kube-api-access-vhc2c\") on node \"crc\" DevicePath \"\"" Dec 09 15:39:44 crc kubenswrapper[4716]: I1209 15:39:44.265759 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58978f24-d8f6-46f2-bb16-d0bb52c93b2a-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 15:39:44 crc kubenswrapper[4716]: I1209 15:39:44.265773 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58978f24-d8f6-46f2-bb16-d0bb52c93b2a-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 15:39:44 crc kubenswrapper[4716]: I1209 15:39:44.586919 4716 generic.go:334] "Generic (PLEG): container finished" podID="58978f24-d8f6-46f2-bb16-d0bb52c93b2a" containerID="e751bdd5f9c7e8b0de53cbb8a9e538eec886e5868136d29b153756b0f42fe968" exitCode=0 Dec 09 15:39:44 crc kubenswrapper[4716]: I1209 15:39:44.586964 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w4kqs" event={"ID":"58978f24-d8f6-46f2-bb16-d0bb52c93b2a","Type":"ContainerDied","Data":"e751bdd5f9c7e8b0de53cbb8a9e538eec886e5868136d29b153756b0f42fe968"} Dec 09 15:39:44 crc kubenswrapper[4716]: I1209 15:39:44.586994 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w4kqs" event={"ID":"58978f24-d8f6-46f2-bb16-d0bb52c93b2a","Type":"ContainerDied","Data":"f4ebb488ee83306c72f75fd0c4bd26eb361e14de48cf81f9ee50c5d9831bd7da"} Dec 09 15:39:44 crc kubenswrapper[4716]: I1209 15:39:44.587005 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w4kqs" Dec 09 15:39:44 crc kubenswrapper[4716]: I1209 15:39:44.587012 4716 scope.go:117] "RemoveContainer" containerID="e751bdd5f9c7e8b0de53cbb8a9e538eec886e5868136d29b153756b0f42fe968" Dec 09 15:39:44 crc kubenswrapper[4716]: I1209 15:39:44.618112 4716 scope.go:117] "RemoveContainer" containerID="0491055e3030f76990a631c323399a51bb91174310f23d40ab015a9b35cf8cd3" Dec 09 15:39:44 crc kubenswrapper[4716]: I1209 15:39:44.634882 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-w4kqs"] Dec 09 15:39:44 crc kubenswrapper[4716]: I1209 15:39:44.642683 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-w4kqs"] Dec 09 15:39:44 crc kubenswrapper[4716]: I1209 15:39:44.750537 4716 scope.go:117] "RemoveContainer" containerID="21ef2473fb186cbdb7dcba905e00fe01218d5c995a15715b2fbe828038227a07" Dec 09 15:39:44 crc kubenswrapper[4716]: I1209 15:39:44.837821 4716 scope.go:117] "RemoveContainer" containerID="e751bdd5f9c7e8b0de53cbb8a9e538eec886e5868136d29b153756b0f42fe968" Dec 09 15:39:44 crc kubenswrapper[4716]: E1209 15:39:44.841778 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e751bdd5f9c7e8b0de53cbb8a9e538eec886e5868136d29b153756b0f42fe968\": container with ID starting with e751bdd5f9c7e8b0de53cbb8a9e538eec886e5868136d29b153756b0f42fe968 not found: ID does not exist" containerID="e751bdd5f9c7e8b0de53cbb8a9e538eec886e5868136d29b153756b0f42fe968" Dec 09 15:39:44 crc kubenswrapper[4716]: I1209 15:39:44.841834 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e751bdd5f9c7e8b0de53cbb8a9e538eec886e5868136d29b153756b0f42fe968"} err="failed to get container status \"e751bdd5f9c7e8b0de53cbb8a9e538eec886e5868136d29b153756b0f42fe968\": rpc error: code = NotFound desc = could not find container \"e751bdd5f9c7e8b0de53cbb8a9e538eec886e5868136d29b153756b0f42fe968\": container with ID starting with e751bdd5f9c7e8b0de53cbb8a9e538eec886e5868136d29b153756b0f42fe968 not found: ID does not exist" Dec 09 15:39:44 crc kubenswrapper[4716]: I1209 15:39:44.841867 4716 scope.go:117] "RemoveContainer" containerID="0491055e3030f76990a631c323399a51bb91174310f23d40ab015a9b35cf8cd3" Dec 09 15:39:44 crc kubenswrapper[4716]: E1209 15:39:44.849820 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0491055e3030f76990a631c323399a51bb91174310f23d40ab015a9b35cf8cd3\": container with ID starting with 0491055e3030f76990a631c323399a51bb91174310f23d40ab015a9b35cf8cd3 not found: ID does not exist" containerID="0491055e3030f76990a631c323399a51bb91174310f23d40ab015a9b35cf8cd3" Dec 09 15:39:44 crc kubenswrapper[4716]: I1209 15:39:44.849878 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0491055e3030f76990a631c323399a51bb91174310f23d40ab015a9b35cf8cd3"} err="failed to get container status \"0491055e3030f76990a631c323399a51bb91174310f23d40ab015a9b35cf8cd3\": rpc error: code = NotFound desc = could not find container \"0491055e3030f76990a631c323399a51bb91174310f23d40ab015a9b35cf8cd3\": container with ID starting with 0491055e3030f76990a631c323399a51bb91174310f23d40ab015a9b35cf8cd3 not found: ID does not exist" Dec 09 15:39:44 crc kubenswrapper[4716]: I1209 15:39:44.849915 4716 scope.go:117] "RemoveContainer" containerID="21ef2473fb186cbdb7dcba905e00fe01218d5c995a15715b2fbe828038227a07" Dec 09 15:39:44 crc kubenswrapper[4716]: E1209 15:39:44.854659 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"21ef2473fb186cbdb7dcba905e00fe01218d5c995a15715b2fbe828038227a07\": container with ID starting with 21ef2473fb186cbdb7dcba905e00fe01218d5c995a15715b2fbe828038227a07 not found: ID does not exist" containerID="21ef2473fb186cbdb7dcba905e00fe01218d5c995a15715b2fbe828038227a07" Dec 09 15:39:44 crc kubenswrapper[4716]: I1209 15:39:44.854714 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"21ef2473fb186cbdb7dcba905e00fe01218d5c995a15715b2fbe828038227a07"} err="failed to get container status \"21ef2473fb186cbdb7dcba905e00fe01218d5c995a15715b2fbe828038227a07\": rpc error: code = NotFound desc = could not find container \"21ef2473fb186cbdb7dcba905e00fe01218d5c995a15715b2fbe828038227a07\": container with ID starting with 21ef2473fb186cbdb7dcba905e00fe01218d5c995a15715b2fbe828038227a07 not found: ID does not exist" Dec 09 15:39:45 crc kubenswrapper[4716]: I1209 15:39:45.227674 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="58978f24-d8f6-46f2-bb16-d0bb52c93b2a" path="/var/lib/kubelet/pods/58978f24-d8f6-46f2-bb16-d0bb52c93b2a/volumes" Dec 09 15:39:46 crc kubenswrapper[4716]: E1209 15:39:46.217514 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:39:48 crc kubenswrapper[4716]: I1209 15:39:48.213403 4716 scope.go:117] "RemoveContainer" containerID="f23d8f973b0c391889ecea36a999ba03ea85dda0d274716304ac66db87b6b8dc" Dec 09 15:39:48 crc kubenswrapper[4716]: E1209 15:39:48.213965 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:39:50 crc kubenswrapper[4716]: I1209 15:39:50.135464 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-brx57" Dec 09 15:39:50 crc kubenswrapper[4716]: I1209 15:39:50.190998 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-brx57" Dec 09 15:39:50 crc kubenswrapper[4716]: E1209 15:39:50.216264 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:39:50 crc kubenswrapper[4716]: I1209 15:39:50.267402 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-brx57"] Dec 09 15:39:50 crc kubenswrapper[4716]: I1209 15:39:50.380205 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9z9z6"] Dec 09 15:39:50 crc kubenswrapper[4716]: I1209 15:39:50.380821 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-9z9z6" podUID="ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922" containerName="registry-server" containerID="cri-o://a2a58763f765477117edd588ce35afb6e4fa9051ec45d86d1bc2dd658468b41a" gracePeriod=2 Dec 09 15:39:50 crc kubenswrapper[4716]: I1209 15:39:50.654442 4716 generic.go:334] "Generic (PLEG): container finished" podID="ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922" containerID="a2a58763f765477117edd588ce35afb6e4fa9051ec45d86d1bc2dd658468b41a" exitCode=0 Dec 09 15:39:50 crc kubenswrapper[4716]: I1209 15:39:50.654508 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9z9z6" event={"ID":"ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922","Type":"ContainerDied","Data":"a2a58763f765477117edd588ce35afb6e4fa9051ec45d86d1bc2dd658468b41a"} Dec 09 15:39:51 crc kubenswrapper[4716]: I1209 15:39:51.506601 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9z9z6" Dec 09 15:39:51 crc kubenswrapper[4716]: I1209 15:39:51.557126 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922-utilities\") pod \"ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922\" (UID: \"ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922\") " Dec 09 15:39:51 crc kubenswrapper[4716]: I1209 15:39:51.557188 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922-catalog-content\") pod \"ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922\" (UID: \"ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922\") " Dec 09 15:39:51 crc kubenswrapper[4716]: I1209 15:39:51.557304 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zxpzk\" (UniqueName: \"kubernetes.io/projected/ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922-kube-api-access-zxpzk\") pod \"ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922\" (UID: \"ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922\") " Dec 09 15:39:51 crc kubenswrapper[4716]: I1209 15:39:51.558483 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922-utilities" (OuterVolumeSpecName: "utilities") pod "ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922" (UID: "ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:39:51 crc kubenswrapper[4716]: I1209 15:39:51.558900 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 15:39:51 crc kubenswrapper[4716]: I1209 15:39:51.567392 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922-kube-api-access-zxpzk" (OuterVolumeSpecName: "kube-api-access-zxpzk") pod "ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922" (UID: "ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922"). InnerVolumeSpecName "kube-api-access-zxpzk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:39:51 crc kubenswrapper[4716]: I1209 15:39:51.661112 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zxpzk\" (UniqueName: \"kubernetes.io/projected/ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922-kube-api-access-zxpzk\") on node \"crc\" DevicePath \"\"" Dec 09 15:39:51 crc kubenswrapper[4716]: I1209 15:39:51.669741 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9z9z6" event={"ID":"ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922","Type":"ContainerDied","Data":"262ff02b228ff0d90442f27669a7e58fdd3c35b4f2506d68ee31e11bd419c60c"} Dec 09 15:39:51 crc kubenswrapper[4716]: I1209 15:39:51.669812 4716 scope.go:117] "RemoveContainer" containerID="a2a58763f765477117edd588ce35afb6e4fa9051ec45d86d1bc2dd658468b41a" Dec 09 15:39:51 crc kubenswrapper[4716]: I1209 15:39:51.669764 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9z9z6" Dec 09 15:39:51 crc kubenswrapper[4716]: I1209 15:39:51.726079 4716 scope.go:117] "RemoveContainer" containerID="b5125c76d8b7c791be100ba5dbd812822be385265dac3d2c438b9c1da1e9371b" Dec 09 15:39:51 crc kubenswrapper[4716]: I1209 15:39:51.728895 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922" (UID: "ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:39:51 crc kubenswrapper[4716]: I1209 15:39:51.765023 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 15:39:51 crc kubenswrapper[4716]: I1209 15:39:51.777951 4716 scope.go:117] "RemoveContainer" containerID="1dee59dc4d865801a95ef3fadf2b870d4d51760b563ddaa653f07d2141af993d" Dec 09 15:39:52 crc kubenswrapper[4716]: I1209 15:39:52.014346 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9z9z6"] Dec 09 15:39:52 crc kubenswrapper[4716]: I1209 15:39:52.027327 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-9z9z6"] Dec 09 15:39:53 crc kubenswrapper[4716]: I1209 15:39:53.226547 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922" path="/var/lib/kubelet/pods/ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922/volumes" Dec 09 15:40:01 crc kubenswrapper[4716]: I1209 15:40:01.214178 4716 scope.go:117] "RemoveContainer" containerID="f23d8f973b0c391889ecea36a999ba03ea85dda0d274716304ac66db87b6b8dc" Dec 09 15:40:01 crc kubenswrapper[4716]: E1209 15:40:01.215125 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:40:01 crc kubenswrapper[4716]: E1209 15:40:01.215957 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:40:05 crc kubenswrapper[4716]: E1209 15:40:05.216969 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:40:12 crc kubenswrapper[4716]: I1209 15:40:12.214378 4716 scope.go:117] "RemoveContainer" containerID="f23d8f973b0c391889ecea36a999ba03ea85dda0d274716304ac66db87b6b8dc" Dec 09 15:40:12 crc kubenswrapper[4716]: E1209 15:40:12.215416 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:40:12 crc kubenswrapper[4716]: E1209 15:40:12.346477 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 09 15:40:12 crc kubenswrapper[4716]: E1209 15:40:12.346546 4716 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 09 15:40:12 crc kubenswrapper[4716]: E1209 15:40:12.346816 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qfm49,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-msshl_openstack(239c4119-944c-46e1-9425-285eeb6e0204): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 15:40:12 crc kubenswrapper[4716]: E1209 15:40:12.348104 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:40:20 crc kubenswrapper[4716]: E1209 15:40:20.340210 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 15:40:20 crc kubenswrapper[4716]: E1209 15:40:20.340610 4716 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 15:40:20 crc kubenswrapper[4716]: E1209 15:40:20.340772 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5fbh9fh77h667h669h94h589h59dh5bdh5bch9fh68h689h699h559h549h56fh5f5h68fh557h5cch7fh66ch6ch86h56h75h55fh58h5ffhf9h5fdq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lz724,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(29cebb2d-8cdb-49de-a29d-1d02808e46a9): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 15:40:20 crc kubenswrapper[4716]: E1209 15:40:20.341967 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:40:25 crc kubenswrapper[4716]: E1209 15:40:25.216362 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:40:27 crc kubenswrapper[4716]: I1209 15:40:27.214639 4716 scope.go:117] "RemoveContainer" containerID="f23d8f973b0c391889ecea36a999ba03ea85dda0d274716304ac66db87b6b8dc" Dec 09 15:40:27 crc kubenswrapper[4716]: E1209 15:40:27.215181 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:40:36 crc kubenswrapper[4716]: E1209 15:40:36.216372 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:40:38 crc kubenswrapper[4716]: I1209 15:40:38.213850 4716 scope.go:117] "RemoveContainer" containerID="f23d8f973b0c391889ecea36a999ba03ea85dda0d274716304ac66db87b6b8dc" Dec 09 15:40:38 crc kubenswrapper[4716]: E1209 15:40:38.214513 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:40:40 crc kubenswrapper[4716]: E1209 15:40:40.216122 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:40:49 crc kubenswrapper[4716]: E1209 15:40:49.217187 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:40:53 crc kubenswrapper[4716]: I1209 15:40:53.223476 4716 scope.go:117] "RemoveContainer" containerID="f23d8f973b0c391889ecea36a999ba03ea85dda0d274716304ac66db87b6b8dc" Dec 09 15:40:53 crc kubenswrapper[4716]: E1209 15:40:53.224852 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:40:53 crc kubenswrapper[4716]: E1209 15:40:53.229764 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:41:04 crc kubenswrapper[4716]: E1209 15:41:04.216339 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:41:06 crc kubenswrapper[4716]: I1209 15:41:06.214593 4716 scope.go:117] "RemoveContainer" containerID="f23d8f973b0c391889ecea36a999ba03ea85dda0d274716304ac66db87b6b8dc" Dec 09 15:41:06 crc kubenswrapper[4716]: E1209 15:41:06.215247 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:41:07 crc kubenswrapper[4716]: E1209 15:41:07.215896 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:41:11 crc kubenswrapper[4716]: I1209 15:41:11.174241 4716 scope.go:117] "RemoveContainer" containerID="298e67967e2357f11792268f36281a94f3e3f3ed23603d05f717188970b1c1ce" Dec 09 15:41:15 crc kubenswrapper[4716]: E1209 15:41:15.217172 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:41:18 crc kubenswrapper[4716]: I1209 15:41:18.214179 4716 scope.go:117] "RemoveContainer" containerID="f23d8f973b0c391889ecea36a999ba03ea85dda0d274716304ac66db87b6b8dc" Dec 09 15:41:18 crc kubenswrapper[4716]: E1209 15:41:18.214988 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:41:21 crc kubenswrapper[4716]: E1209 15:41:21.217101 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:41:28 crc kubenswrapper[4716]: E1209 15:41:28.216215 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:41:30 crc kubenswrapper[4716]: I1209 15:41:30.053497 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-0e28-account-create-update-xwvcx"] Dec 09 15:41:30 crc kubenswrapper[4716]: I1209 15:41:30.069279 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-rchbw"] Dec 09 15:41:30 crc kubenswrapper[4716]: I1209 15:41:30.081403 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-0e28-account-create-update-xwvcx"] Dec 09 15:41:30 crc kubenswrapper[4716]: I1209 15:41:30.092737 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-rchbw"] Dec 09 15:41:31 crc kubenswrapper[4716]: I1209 15:41:31.231038 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20c9ed62-b26d-4c7e-b737-057d5afda2da" path="/var/lib/kubelet/pods/20c9ed62-b26d-4c7e-b737-057d5afda2da/volumes" Dec 09 15:41:31 crc kubenswrapper[4716]: I1209 15:41:31.232495 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b12de1dd-ac2d-4a05-af7d-f675b007109d" path="/var/lib/kubelet/pods/b12de1dd-ac2d-4a05-af7d-f675b007109d/volumes" Dec 09 15:41:32 crc kubenswrapper[4716]: I1209 15:41:32.214851 4716 scope.go:117] "RemoveContainer" containerID="f23d8f973b0c391889ecea36a999ba03ea85dda0d274716304ac66db87b6b8dc" Dec 09 15:41:32 crc kubenswrapper[4716]: E1209 15:41:32.215413 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:41:33 crc kubenswrapper[4716]: I1209 15:41:33.030741 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-qnztr"] Dec 09 15:41:33 crc kubenswrapper[4716]: I1209 15:41:33.045117 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-qnztr"] Dec 09 15:41:33 crc kubenswrapper[4716]: I1209 15:41:33.229290 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="95ca86cf-1004-4d90-8ed7-c5e8277f2f84" path="/var/lib/kubelet/pods/95ca86cf-1004-4d90-8ed7-c5e8277f2f84/volumes" Dec 09 15:41:35 crc kubenswrapper[4716]: I1209 15:41:35.042549 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-wgl8z"] Dec 09 15:41:35 crc kubenswrapper[4716]: I1209 15:41:35.059444 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-83bd-account-create-update-xs2rb"] Dec 09 15:41:35 crc kubenswrapper[4716]: I1209 15:41:35.071404 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-zftdd"] Dec 09 15:41:35 crc kubenswrapper[4716]: I1209 15:41:35.082890 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-a78b-account-create-update-mhkd2"] Dec 09 15:41:35 crc kubenswrapper[4716]: I1209 15:41:35.094207 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-zftdd"] Dec 09 15:41:35 crc kubenswrapper[4716]: I1209 15:41:35.104987 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-10b6-account-create-update-gnpg9"] Dec 09 15:41:35 crc kubenswrapper[4716]: I1209 15:41:35.115644 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-wgl8z"] Dec 09 15:41:35 crc kubenswrapper[4716]: I1209 15:41:35.126405 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-83bd-account-create-update-xs2rb"] Dec 09 15:41:35 crc kubenswrapper[4716]: I1209 15:41:35.136992 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-a78b-account-create-update-mhkd2"] Dec 09 15:41:35 crc kubenswrapper[4716]: I1209 15:41:35.147416 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-10b6-account-create-update-gnpg9"] Dec 09 15:41:35 crc kubenswrapper[4716]: E1209 15:41:35.217241 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:41:35 crc kubenswrapper[4716]: I1209 15:41:35.228984 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0f8150e8-0d21-4e44-bfda-32a724caf6ad" path="/var/lib/kubelet/pods/0f8150e8-0d21-4e44-bfda-32a724caf6ad/volumes" Dec 09 15:41:35 crc kubenswrapper[4716]: I1209 15:41:35.230579 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7b147869-4532-4858-afbe-5280be85584a" path="/var/lib/kubelet/pods/7b147869-4532-4858-afbe-5280be85584a/volumes" Dec 09 15:41:35 crc kubenswrapper[4716]: I1209 15:41:35.233026 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d3dcd346-c1af-49fd-91f2-4ff1d1d3329b" path="/var/lib/kubelet/pods/d3dcd346-c1af-49fd-91f2-4ff1d1d3329b/volumes" Dec 09 15:41:35 crc kubenswrapper[4716]: I1209 15:41:35.234566 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e36549f8-8725-4e36-840b-1bdaa80c2e52" path="/var/lib/kubelet/pods/e36549f8-8725-4e36-840b-1bdaa80c2e52/volumes" Dec 09 15:41:35 crc kubenswrapper[4716]: I1209 15:41:35.235730 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eed32c8a-48cc-4162-93b6-7830c131a586" path="/var/lib/kubelet/pods/eed32c8a-48cc-4162-93b6-7830c131a586/volumes" Dec 09 15:41:38 crc kubenswrapper[4716]: I1209 15:41:38.029928 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-jwqjv"] Dec 09 15:41:38 crc kubenswrapper[4716]: I1209 15:41:38.060885 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-6090-account-create-update-pqqf4"] Dec 09 15:41:38 crc kubenswrapper[4716]: I1209 15:41:38.075846 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-6090-account-create-update-pqqf4"] Dec 09 15:41:38 crc kubenswrapper[4716]: I1209 15:41:38.094178 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-jwqjv"] Dec 09 15:41:39 crc kubenswrapper[4716]: E1209 15:41:39.215690 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:41:39 crc kubenswrapper[4716]: I1209 15:41:39.228991 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0529dfa0-1e8c-4112-af01-c03890a167cd" path="/var/lib/kubelet/pods/0529dfa0-1e8c-4112-af01-c03890a167cd/volumes" Dec 09 15:41:39 crc kubenswrapper[4716]: I1209 15:41:39.230223 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ac603427-aaf7-459a-b6bf-11fd5926113b" path="/var/lib/kubelet/pods/ac603427-aaf7-459a-b6bf-11fd5926113b/volumes" Dec 09 15:41:46 crc kubenswrapper[4716]: I1209 15:41:46.214055 4716 scope.go:117] "RemoveContainer" containerID="f23d8f973b0c391889ecea36a999ba03ea85dda0d274716304ac66db87b6b8dc" Dec 09 15:41:46 crc kubenswrapper[4716]: E1209 15:41:46.215227 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:41:48 crc kubenswrapper[4716]: E1209 15:41:48.215684 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:41:54 crc kubenswrapper[4716]: E1209 15:41:54.217054 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:41:59 crc kubenswrapper[4716]: I1209 15:41:59.215310 4716 scope.go:117] "RemoveContainer" containerID="f23d8f973b0c391889ecea36a999ba03ea85dda0d274716304ac66db87b6b8dc" Dec 09 15:42:00 crc kubenswrapper[4716]: I1209 15:42:00.176176 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerStarted","Data":"7bc95cea1510600028ea0972ceb137a19bfc71882cae48ac9032c1a550a517f4"} Dec 09 15:42:01 crc kubenswrapper[4716]: E1209 15:42:01.216896 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:42:02 crc kubenswrapper[4716]: I1209 15:42:02.198138 4716 generic.go:334] "Generic (PLEG): container finished" podID="7a03690d-fbfb-4a36-9b27-ca857fcc88f1" containerID="30acac6410b37ffa2afc9776eb9387a1fe379d293eb163f03ed2379d8fa8eb21" exitCode=0 Dec 09 15:42:02 crc kubenswrapper[4716]: I1209 15:42:02.198251 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-hblsv" event={"ID":"7a03690d-fbfb-4a36-9b27-ca857fcc88f1","Type":"ContainerDied","Data":"30acac6410b37ffa2afc9776eb9387a1fe379d293eb163f03ed2379d8fa8eb21"} Dec 09 15:42:03 crc kubenswrapper[4716]: I1209 15:42:03.886472 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-hblsv" Dec 09 15:42:03 crc kubenswrapper[4716]: I1209 15:42:03.995977 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7a03690d-fbfb-4a36-9b27-ca857fcc88f1-inventory\") pod \"7a03690d-fbfb-4a36-9b27-ca857fcc88f1\" (UID: \"7a03690d-fbfb-4a36-9b27-ca857fcc88f1\") " Dec 09 15:42:03 crc kubenswrapper[4716]: I1209 15:42:03.996089 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-th257\" (UniqueName: \"kubernetes.io/projected/7a03690d-fbfb-4a36-9b27-ca857fcc88f1-kube-api-access-th257\") pod \"7a03690d-fbfb-4a36-9b27-ca857fcc88f1\" (UID: \"7a03690d-fbfb-4a36-9b27-ca857fcc88f1\") " Dec 09 15:42:03 crc kubenswrapper[4716]: I1209 15:42:03.996144 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7a03690d-fbfb-4a36-9b27-ca857fcc88f1-ssh-key\") pod \"7a03690d-fbfb-4a36-9b27-ca857fcc88f1\" (UID: \"7a03690d-fbfb-4a36-9b27-ca857fcc88f1\") " Dec 09 15:42:03 crc kubenswrapper[4716]: I1209 15:42:03.996183 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a03690d-fbfb-4a36-9b27-ca857fcc88f1-bootstrap-combined-ca-bundle\") pod \"7a03690d-fbfb-4a36-9b27-ca857fcc88f1\" (UID: \"7a03690d-fbfb-4a36-9b27-ca857fcc88f1\") " Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.003150 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a03690d-fbfb-4a36-9b27-ca857fcc88f1-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "7a03690d-fbfb-4a36-9b27-ca857fcc88f1" (UID: "7a03690d-fbfb-4a36-9b27-ca857fcc88f1"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.003636 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a03690d-fbfb-4a36-9b27-ca857fcc88f1-kube-api-access-th257" (OuterVolumeSpecName: "kube-api-access-th257") pod "7a03690d-fbfb-4a36-9b27-ca857fcc88f1" (UID: "7a03690d-fbfb-4a36-9b27-ca857fcc88f1"). InnerVolumeSpecName "kube-api-access-th257". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.032494 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a03690d-fbfb-4a36-9b27-ca857fcc88f1-inventory" (OuterVolumeSpecName: "inventory") pod "7a03690d-fbfb-4a36-9b27-ca857fcc88f1" (UID: "7a03690d-fbfb-4a36-9b27-ca857fcc88f1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.039417 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a03690d-fbfb-4a36-9b27-ca857fcc88f1-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "7a03690d-fbfb-4a36-9b27-ca857fcc88f1" (UID: "7a03690d-fbfb-4a36-9b27-ca857fcc88f1"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.099090 4716 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7a03690d-fbfb-4a36-9b27-ca857fcc88f1-inventory\") on node \"crc\" DevicePath \"\"" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.099132 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-th257\" (UniqueName: \"kubernetes.io/projected/7a03690d-fbfb-4a36-9b27-ca857fcc88f1-kube-api-access-th257\") on node \"crc\" DevicePath \"\"" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.099143 4716 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7a03690d-fbfb-4a36-9b27-ca857fcc88f1-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.099151 4716 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a03690d-fbfb-4a36-9b27-ca857fcc88f1-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.276929 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-hblsv" event={"ID":"7a03690d-fbfb-4a36-9b27-ca857fcc88f1","Type":"ContainerDied","Data":"a39000f89ba1b6c2d7151d9f5bdb83a7dcf5570c8e5066df9855cd9cea5e1a97"} Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.276977 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a39000f89ba1b6c2d7151d9f5bdb83a7dcf5570c8e5066df9855cd9cea5e1a97" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.277040 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-hblsv" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.327502 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-b8znx"] Dec 09 15:42:04 crc kubenswrapper[4716]: E1209 15:42:04.328127 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="382d6c66-2bf2-4240-99ca-b71b255a07c3" containerName="extract-content" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.328155 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="382d6c66-2bf2-4240-99ca-b71b255a07c3" containerName="extract-content" Dec 09 15:42:04 crc kubenswrapper[4716]: E1209 15:42:04.328177 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922" containerName="registry-server" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.328185 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922" containerName="registry-server" Dec 09 15:42:04 crc kubenswrapper[4716]: E1209 15:42:04.328196 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a03690d-fbfb-4a36-9b27-ca857fcc88f1" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.328203 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a03690d-fbfb-4a36-9b27-ca857fcc88f1" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Dec 09 15:42:04 crc kubenswrapper[4716]: E1209 15:42:04.328214 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922" containerName="extract-utilities" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.328220 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922" containerName="extract-utilities" Dec 09 15:42:04 crc kubenswrapper[4716]: E1209 15:42:04.328237 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58978f24-d8f6-46f2-bb16-d0bb52c93b2a" containerName="registry-server" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.328243 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="58978f24-d8f6-46f2-bb16-d0bb52c93b2a" containerName="registry-server" Dec 09 15:42:04 crc kubenswrapper[4716]: E1209 15:42:04.328264 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922" containerName="extract-content" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.328271 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922" containerName="extract-content" Dec 09 15:42:04 crc kubenswrapper[4716]: E1209 15:42:04.328280 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58978f24-d8f6-46f2-bb16-d0bb52c93b2a" containerName="extract-utilities" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.328288 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="58978f24-d8f6-46f2-bb16-d0bb52c93b2a" containerName="extract-utilities" Dec 09 15:42:04 crc kubenswrapper[4716]: E1209 15:42:04.328301 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="382d6c66-2bf2-4240-99ca-b71b255a07c3" containerName="registry-server" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.328307 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="382d6c66-2bf2-4240-99ca-b71b255a07c3" containerName="registry-server" Dec 09 15:42:04 crc kubenswrapper[4716]: E1209 15:42:04.328325 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="382d6c66-2bf2-4240-99ca-b71b255a07c3" containerName="extract-utilities" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.328331 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="382d6c66-2bf2-4240-99ca-b71b255a07c3" containerName="extract-utilities" Dec 09 15:42:04 crc kubenswrapper[4716]: E1209 15:42:04.328352 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58978f24-d8f6-46f2-bb16-d0bb52c93b2a" containerName="extract-content" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.328358 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="58978f24-d8f6-46f2-bb16-d0bb52c93b2a" containerName="extract-content" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.328600 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab67bf52-ab18-4d0d-b10a-1ea9b9aa4922" containerName="registry-server" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.328632 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="58978f24-d8f6-46f2-bb16-d0bb52c93b2a" containerName="registry-server" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.328651 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="382d6c66-2bf2-4240-99ca-b71b255a07c3" containerName="registry-server" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.328681 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a03690d-fbfb-4a36-9b27-ca857fcc88f1" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.329734 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-b8znx" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.331528 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.332609 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-vjwcm" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.333600 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.336179 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.343903 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-b8znx"] Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.406896 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/56391e5b-15d4-4f61-9d57-6512b28a5254-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-b8znx\" (UID: \"56391e5b-15d4-4f61-9d57-6512b28a5254\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-b8znx" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.406956 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/56391e5b-15d4-4f61-9d57-6512b28a5254-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-b8znx\" (UID: \"56391e5b-15d4-4f61-9d57-6512b28a5254\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-b8znx" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.407272 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lkc5s\" (UniqueName: \"kubernetes.io/projected/56391e5b-15d4-4f61-9d57-6512b28a5254-kube-api-access-lkc5s\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-b8znx\" (UID: \"56391e5b-15d4-4f61-9d57-6512b28a5254\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-b8znx" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.509585 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/56391e5b-15d4-4f61-9d57-6512b28a5254-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-b8znx\" (UID: \"56391e5b-15d4-4f61-9d57-6512b28a5254\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-b8znx" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.510595 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/56391e5b-15d4-4f61-9d57-6512b28a5254-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-b8znx\" (UID: \"56391e5b-15d4-4f61-9d57-6512b28a5254\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-b8znx" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.510758 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lkc5s\" (UniqueName: \"kubernetes.io/projected/56391e5b-15d4-4f61-9d57-6512b28a5254-kube-api-access-lkc5s\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-b8znx\" (UID: \"56391e5b-15d4-4f61-9d57-6512b28a5254\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-b8znx" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.515572 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/56391e5b-15d4-4f61-9d57-6512b28a5254-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-b8znx\" (UID: \"56391e5b-15d4-4f61-9d57-6512b28a5254\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-b8znx" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.515587 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/56391e5b-15d4-4f61-9d57-6512b28a5254-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-b8znx\" (UID: \"56391e5b-15d4-4f61-9d57-6512b28a5254\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-b8znx" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.530044 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lkc5s\" (UniqueName: \"kubernetes.io/projected/56391e5b-15d4-4f61-9d57-6512b28a5254-kube-api-access-lkc5s\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-b8znx\" (UID: \"56391e5b-15d4-4f61-9d57-6512b28a5254\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-b8znx" Dec 09 15:42:04 crc kubenswrapper[4716]: I1209 15:42:04.676647 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-b8znx" Dec 09 15:42:05 crc kubenswrapper[4716]: I1209 15:42:05.355927 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-b8znx"] Dec 09 15:42:05 crc kubenswrapper[4716]: I1209 15:42:05.370122 4716 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 09 15:42:06 crc kubenswrapper[4716]: E1209 15:42:06.215069 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:42:06 crc kubenswrapper[4716]: I1209 15:42:06.302738 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-b8znx" event={"ID":"56391e5b-15d4-4f61-9d57-6512b28a5254","Type":"ContainerStarted","Data":"8799b9a1e5f464d56f64a5b0d0d632300120111a75c01e42ccf1b6be90a4c350"} Dec 09 15:42:06 crc kubenswrapper[4716]: I1209 15:42:06.303030 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-b8znx" event={"ID":"56391e5b-15d4-4f61-9d57-6512b28a5254","Type":"ContainerStarted","Data":"603461ca11ff1f8948de5a56b8920be8fa23f8d75403e5c6510854fd878cd208"} Dec 09 15:42:06 crc kubenswrapper[4716]: I1209 15:42:06.334694 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-b8znx" podStartSLOduration=1.965421857 podStartE2EDuration="2.334666374s" podCreationTimestamp="2025-12-09 15:42:04 +0000 UTC" firstStartedPulling="2025-12-09 15:42:05.369750985 +0000 UTC m=+2012.524494973" lastFinishedPulling="2025-12-09 15:42:05.738995512 +0000 UTC m=+2012.893739490" observedRunningTime="2025-12-09 15:42:06.321772186 +0000 UTC m=+2013.476516174" watchObservedRunningTime="2025-12-09 15:42:06.334666374 +0000 UTC m=+2013.489410362" Dec 09 15:42:07 crc kubenswrapper[4716]: I1209 15:42:07.059278 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-9v9w2"] Dec 09 15:42:07 crc kubenswrapper[4716]: I1209 15:42:07.071211 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-9v9w2"] Dec 09 15:42:07 crc kubenswrapper[4716]: I1209 15:42:07.227731 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="45c934d3-830f-4a67-ae5f-cd703dbed98c" path="/var/lib/kubelet/pods/45c934d3-830f-4a67-ae5f-cd703dbed98c/volumes" Dec 09 15:42:09 crc kubenswrapper[4716]: I1209 15:42:09.030850 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-62b3-account-create-update-nwf77"] Dec 09 15:42:09 crc kubenswrapper[4716]: I1209 15:42:09.045908 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-62b3-account-create-update-nwf77"] Dec 09 15:42:09 crc kubenswrapper[4716]: I1209 15:42:09.227491 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9559f453-f355-4947-9078-0dc08b47d647" path="/var/lib/kubelet/pods/9559f453-f355-4947-9078-0dc08b47d647/volumes" Dec 09 15:42:11 crc kubenswrapper[4716]: I1209 15:42:11.256589 4716 scope.go:117] "RemoveContainer" containerID="3fd51d0e4508bfba720c7770bc6625dcaa3c85f3d97fff746c8db2339ec160ad" Dec 09 15:42:11 crc kubenswrapper[4716]: I1209 15:42:11.283473 4716 scope.go:117] "RemoveContainer" containerID="f853614d4a6d711d88bcc719f44903caefb4a2897b919bb3037953a2ce6fb047" Dec 09 15:42:11 crc kubenswrapper[4716]: I1209 15:42:11.365824 4716 scope.go:117] "RemoveContainer" containerID="b1ddc6c27910cc1411c865b8b2c0e8b0f224d5df32b9ab97972b290f8f266983" Dec 09 15:42:11 crc kubenswrapper[4716]: I1209 15:42:11.411959 4716 scope.go:117] "RemoveContainer" containerID="858976ae7428005951774bd37e3d75762c1000efe5152e7e5b5133ab6b02ad31" Dec 09 15:42:11 crc kubenswrapper[4716]: I1209 15:42:11.438056 4716 scope.go:117] "RemoveContainer" containerID="197798eebb7dc48dc6e78ccd6cb07a25c1391dd979725dc192380f71455fbda8" Dec 09 15:42:11 crc kubenswrapper[4716]: I1209 15:42:11.495783 4716 scope.go:117] "RemoveContainer" containerID="1634feab212b94e9f8e253b1f32db52e3800581b526ffe3408c569c83c98a038" Dec 09 15:42:11 crc kubenswrapper[4716]: I1209 15:42:11.549016 4716 scope.go:117] "RemoveContainer" containerID="3fda476c8fc95d2325720e50f8dfdea3525cb3be15bd404b76844562f3c648c9" Dec 09 15:42:11 crc kubenswrapper[4716]: I1209 15:42:11.606270 4716 scope.go:117] "RemoveContainer" containerID="1bed3efe42d3cd6befc635ecdeadeb81c22f11f9b841434238e434fe8690e559" Dec 09 15:42:11 crc kubenswrapper[4716]: I1209 15:42:11.630016 4716 scope.go:117] "RemoveContainer" containerID="2e9050cfa87e8b69d7fb47e9180d456507eb680327214b0c7a0db3b4b458aa24" Dec 09 15:42:11 crc kubenswrapper[4716]: I1209 15:42:11.653128 4716 scope.go:117] "RemoveContainer" containerID="c473a760322080d2abe30272df5e8cc64353825f9a1e2fa7f97a0cd8aed3def2" Dec 09 15:42:11 crc kubenswrapper[4716]: I1209 15:42:11.680514 4716 scope.go:117] "RemoveContainer" containerID="130427c60649e5cd4be163e1625b38d357f73119d3cc0503d072756d878970b7" Dec 09 15:42:11 crc kubenswrapper[4716]: I1209 15:42:11.704247 4716 scope.go:117] "RemoveContainer" containerID="5ae6cd3f51234d7418529cd76d81323bd7cd2f60177378f4732707c4e95c821a" Dec 09 15:42:11 crc kubenswrapper[4716]: I1209 15:42:11.733147 4716 scope.go:117] "RemoveContainer" containerID="a0a47b3b97966f336ea916ba548a6c11cba9f183ad4c6993f1f19ee1604dcfbe" Dec 09 15:42:11 crc kubenswrapper[4716]: I1209 15:42:11.758120 4716 scope.go:117] "RemoveContainer" containerID="c8ab4fb3eb7c43e6529b4c02e78ed1d12b73bf9bc71bc42ea8bd35357a1d4e87" Dec 09 15:42:12 crc kubenswrapper[4716]: I1209 15:42:12.078050 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-create-xsvm8"] Dec 09 15:42:12 crc kubenswrapper[4716]: I1209 15:42:12.099145 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-d961-account-create-update-jk7hm"] Dec 09 15:42:12 crc kubenswrapper[4716]: I1209 15:42:12.122312 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-bde5-account-create-update-jtrwz"] Dec 09 15:42:12 crc kubenswrapper[4716]: I1209 15:42:12.134927 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-create-xsvm8"] Dec 09 15:42:12 crc kubenswrapper[4716]: I1209 15:42:12.148971 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-3ed5-account-create-update-c4v8k"] Dec 09 15:42:12 crc kubenswrapper[4716]: I1209 15:42:12.167292 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-mg54n"] Dec 09 15:42:12 crc kubenswrapper[4716]: I1209 15:42:12.185181 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-d961-account-create-update-jk7hm"] Dec 09 15:42:12 crc kubenswrapper[4716]: I1209 15:42:12.201264 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-mg54n"] Dec 09 15:42:12 crc kubenswrapper[4716]: I1209 15:42:12.216886 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-3ed5-account-create-update-c4v8k"] Dec 09 15:42:12 crc kubenswrapper[4716]: I1209 15:42:12.228982 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-cp69g"] Dec 09 15:42:12 crc kubenswrapper[4716]: I1209 15:42:12.239880 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-bde5-account-create-update-jtrwz"] Dec 09 15:42:12 crc kubenswrapper[4716]: I1209 15:42:12.255130 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-cp69g"] Dec 09 15:42:13 crc kubenswrapper[4716]: I1209 15:42:13.227727 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3533a208-b517-4082-9d3b-17baddccedfd" path="/var/lib/kubelet/pods/3533a208-b517-4082-9d3b-17baddccedfd/volumes" Dec 09 15:42:13 crc kubenswrapper[4716]: I1209 15:42:13.229207 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="456491de-2a30-4a09-8a30-de5a3c8ef790" path="/var/lib/kubelet/pods/456491de-2a30-4a09-8a30-de5a3c8ef790/volumes" Dec 09 15:42:13 crc kubenswrapper[4716]: I1209 15:42:13.230088 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="75caa54f-bdef-4ae6-ba85-94c61ddc84f0" path="/var/lib/kubelet/pods/75caa54f-bdef-4ae6-ba85-94c61ddc84f0/volumes" Dec 09 15:42:13 crc kubenswrapper[4716]: I1209 15:42:13.232755 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="812fdc3c-a04b-4074-9c33-2e4e2b82496d" path="/var/lib/kubelet/pods/812fdc3c-a04b-4074-9c33-2e4e2b82496d/volumes" Dec 09 15:42:13 crc kubenswrapper[4716]: I1209 15:42:13.233392 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="958ef966-3a33-491c-98fc-b13f19437e00" path="/var/lib/kubelet/pods/958ef966-3a33-491c-98fc-b13f19437e00/volumes" Dec 09 15:42:13 crc kubenswrapper[4716]: I1209 15:42:13.234118 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eff29269-58ab-4659-82bb-6bef2fd7d5d1" path="/var/lib/kubelet/pods/eff29269-58ab-4659-82bb-6bef2fd7d5d1/volumes" Dec 09 15:42:14 crc kubenswrapper[4716]: E1209 15:42:14.216531 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:42:15 crc kubenswrapper[4716]: I1209 15:42:15.030210 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-zf7ll"] Dec 09 15:42:15 crc kubenswrapper[4716]: I1209 15:42:15.041594 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-zf7ll"] Dec 09 15:42:15 crc kubenswrapper[4716]: I1209 15:42:15.234831 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf4177cf-f9cb-4e7f-94d8-bec58db07c2f" path="/var/lib/kubelet/pods/cf4177cf-f9cb-4e7f-94d8-bec58db07c2f/volumes" Dec 09 15:42:18 crc kubenswrapper[4716]: I1209 15:42:18.033697 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-pzgnj"] Dec 09 15:42:18 crc kubenswrapper[4716]: I1209 15:42:18.045950 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-pzgnj"] Dec 09 15:42:18 crc kubenswrapper[4716]: E1209 15:42:18.216420 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:42:19 crc kubenswrapper[4716]: I1209 15:42:19.263596 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c120b5a8-f662-4839-be5c-b9d94e80ab72" path="/var/lib/kubelet/pods/c120b5a8-f662-4839-be5c-b9d94e80ab72/volumes" Dec 09 15:42:25 crc kubenswrapper[4716]: E1209 15:42:25.216149 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:42:32 crc kubenswrapper[4716]: E1209 15:42:32.215789 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:42:38 crc kubenswrapper[4716]: E1209 15:42:38.216913 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:42:43 crc kubenswrapper[4716]: E1209 15:42:43.223371 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:42:51 crc kubenswrapper[4716]: E1209 15:42:51.217815 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:42:54 crc kubenswrapper[4716]: E1209 15:42:54.217045 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:42:58 crc kubenswrapper[4716]: I1209 15:42:58.055491 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-2rwfp"] Dec 09 15:42:58 crc kubenswrapper[4716]: I1209 15:42:58.071005 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-2rwfp"] Dec 09 15:42:59 crc kubenswrapper[4716]: I1209 15:42:59.229911 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3aba2daa-0fe3-419f-a361-1be829c1e3d0" path="/var/lib/kubelet/pods/3aba2daa-0fe3-419f-a361-1be829c1e3d0/volumes" Dec 09 15:43:02 crc kubenswrapper[4716]: I1209 15:43:02.037190 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-d4ccd"] Dec 09 15:43:02 crc kubenswrapper[4716]: I1209 15:43:02.048126 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-d4ccd"] Dec 09 15:43:02 crc kubenswrapper[4716]: E1209 15:43:02.307656 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 09 15:43:02 crc kubenswrapper[4716]: E1209 15:43:02.307709 4716 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 09 15:43:02 crc kubenswrapper[4716]: E1209 15:43:02.307856 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qfm49,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-msshl_openstack(239c4119-944c-46e1-9425-285eeb6e0204): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 15:43:02 crc kubenswrapper[4716]: E1209 15:43:02.309060 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:43:03 crc kubenswrapper[4716]: I1209 15:43:03.230440 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c59c8f4-e888-4345-8cdd-5581ef0f801c" path="/var/lib/kubelet/pods/2c59c8f4-e888-4345-8cdd-5581ef0f801c/volumes" Dec 09 15:43:09 crc kubenswrapper[4716]: I1209 15:43:09.031911 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-7rkmg"] Dec 09 15:43:09 crc kubenswrapper[4716]: I1209 15:43:09.044487 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-7rkmg"] Dec 09 15:43:09 crc kubenswrapper[4716]: I1209 15:43:09.251937 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff2490f6-9bf3-41a3-a2a5-48e1e7d26738" path="/var/lib/kubelet/pods/ff2490f6-9bf3-41a3-a2a5-48e1e7d26738/volumes" Dec 09 15:43:09 crc kubenswrapper[4716]: E1209 15:43:09.336436 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 15:43:09 crc kubenswrapper[4716]: E1209 15:43:09.336518 4716 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 15:43:09 crc kubenswrapper[4716]: E1209 15:43:09.336704 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5fbh9fh77h667h669h94h589h59dh5bdh5bch9fh68h689h699h559h549h56fh5f5h68fh557h5cch7fh66ch6ch86h56h75h55fh58h5ffhf9h5fdq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lz724,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(29cebb2d-8cdb-49de-a29d-1d02808e46a9): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 15:43:09 crc kubenswrapper[4716]: E1209 15:43:09.338575 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:43:10 crc kubenswrapper[4716]: I1209 15:43:10.033018 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-64jmg"] Dec 09 15:43:10 crc kubenswrapper[4716]: I1209 15:43:10.045364 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-64jmg"] Dec 09 15:43:11 crc kubenswrapper[4716]: I1209 15:43:11.227954 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8fd29da9-c781-4e6d-857c-17f84d72c639" path="/var/lib/kubelet/pods/8fd29da9-c781-4e6d-857c-17f84d72c639/volumes" Dec 09 15:43:12 crc kubenswrapper[4716]: I1209 15:43:12.055893 4716 scope.go:117] "RemoveContainer" containerID="d3cc0b443877252c4d440de7341385ef4db2275850d795350c3794ff6b7e01cd" Dec 09 15:43:12 crc kubenswrapper[4716]: I1209 15:43:12.093456 4716 scope.go:117] "RemoveContainer" containerID="5c87a39b51845cec97b5e11f1d6aefaf48ad0f3913e6761edce07cab1f310632" Dec 09 15:43:12 crc kubenswrapper[4716]: I1209 15:43:12.166581 4716 scope.go:117] "RemoveContainer" containerID="ec5055e85a18c1c7af53bbce65d932276d5af1b014a4baab5a0cb734967c2c57" Dec 09 15:43:12 crc kubenswrapper[4716]: I1209 15:43:12.216763 4716 scope.go:117] "RemoveContainer" containerID="ae8761e7087d4c45bc84a019ea59e6f13963ed8ab357ecf369467de782ebdc56" Dec 09 15:43:12 crc kubenswrapper[4716]: I1209 15:43:12.266810 4716 scope.go:117] "RemoveContainer" containerID="f911366deb5be34ddbd72b25d8fc89f32ff62065ea6c93a230d269e1ad867d39" Dec 09 15:43:12 crc kubenswrapper[4716]: I1209 15:43:12.324205 4716 scope.go:117] "RemoveContainer" containerID="7581187e2cfe62902b026e949dc19004b041c21d3ec7d4fa8841f43d63de9847" Dec 09 15:43:12 crc kubenswrapper[4716]: I1209 15:43:12.375056 4716 scope.go:117] "RemoveContainer" containerID="931a1a63605551cbbfa8c8b8e6265e961811a71f88c3d30b8c212b47c64f46a7" Dec 09 15:43:12 crc kubenswrapper[4716]: I1209 15:43:12.397412 4716 scope.go:117] "RemoveContainer" containerID="5f3d50cb4d4a5c8a8ddff54f0236d1655a75a6e86006b513bc2122241886f195" Dec 09 15:43:12 crc kubenswrapper[4716]: I1209 15:43:12.420418 4716 scope.go:117] "RemoveContainer" containerID="c221a3151493f2b140614c7cfe1253a17de8501008871448aed0c97c464fcecf" Dec 09 15:43:12 crc kubenswrapper[4716]: I1209 15:43:12.441317 4716 scope.go:117] "RemoveContainer" containerID="98e01daabc507ce7dd36c622b9d24cd953dd88a862f2bad6c8c3ada8324338a9" Dec 09 15:43:12 crc kubenswrapper[4716]: I1209 15:43:12.463923 4716 scope.go:117] "RemoveContainer" containerID="709509d41f485759711038d93c8054610717fcbc075bc5943916085cf24cf18c" Dec 09 15:43:12 crc kubenswrapper[4716]: I1209 15:43:12.487947 4716 scope.go:117] "RemoveContainer" containerID="a98698868ee6063344f074ce17ef56b78cebc9dffd3e4652fa4aacae3cb4e5aa" Dec 09 15:43:17 crc kubenswrapper[4716]: E1209 15:43:17.217187 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:43:21 crc kubenswrapper[4716]: E1209 15:43:21.217252 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:43:25 crc kubenswrapper[4716]: I1209 15:43:25.044641 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-sqhrf"] Dec 09 15:43:25 crc kubenswrapper[4716]: I1209 15:43:25.055240 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-sqhrf"] Dec 09 15:43:25 crc kubenswrapper[4716]: I1209 15:43:25.226186 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c31e7c21-64fd-4bb2-b165-df1743489363" path="/var/lib/kubelet/pods/c31e7c21-64fd-4bb2-b165-df1743489363/volumes" Dec 09 15:43:31 crc kubenswrapper[4716]: E1209 15:43:31.215925 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:43:33 crc kubenswrapper[4716]: E1209 15:43:33.227151 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:43:44 crc kubenswrapper[4716]: E1209 15:43:44.217848 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:43:46 crc kubenswrapper[4716]: E1209 15:43:46.214998 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:43:56 crc kubenswrapper[4716]: E1209 15:43:56.218032 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:43:57 crc kubenswrapper[4716]: E1209 15:43:57.218466 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:44:10 crc kubenswrapper[4716]: E1209 15:44:10.217304 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:44:10 crc kubenswrapper[4716]: E1209 15:44:10.217550 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:44:12 crc kubenswrapper[4716]: I1209 15:44:12.760831 4716 scope.go:117] "RemoveContainer" containerID="3618ecfbe5474f07f5131d4d0ad84bde532e88e50f0fe62e5e562bcbd33c4218" Dec 09 15:44:17 crc kubenswrapper[4716]: I1209 15:44:17.922592 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 15:44:17 crc kubenswrapper[4716]: I1209 15:44:17.923232 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 15:44:21 crc kubenswrapper[4716]: E1209 15:44:21.217352 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:44:25 crc kubenswrapper[4716]: E1209 15:44:25.215891 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:44:33 crc kubenswrapper[4716]: I1209 15:44:33.060933 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-448gp"] Dec 09 15:44:33 crc kubenswrapper[4716]: I1209 15:44:33.076313 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-448gp"] Dec 09 15:44:33 crc kubenswrapper[4716]: I1209 15:44:33.251155 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="34faeba3-0fa7-439c-888d-98bc7ec01369" path="/var/lib/kubelet/pods/34faeba3-0fa7-439c-888d-98bc7ec01369/volumes" Dec 09 15:44:34 crc kubenswrapper[4716]: I1209 15:44:34.037943 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-7a56-account-create-update-shpjz"] Dec 09 15:44:34 crc kubenswrapper[4716]: I1209 15:44:34.086426 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-7j5l9"] Dec 09 15:44:34 crc kubenswrapper[4716]: I1209 15:44:34.100488 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-e5db-account-create-update-gk6pn"] Dec 09 15:44:34 crc kubenswrapper[4716]: I1209 15:44:34.111021 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-6jgz9"] Dec 09 15:44:34 crc kubenswrapper[4716]: I1209 15:44:34.120939 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-30c9-account-create-update-vtv2g"] Dec 09 15:44:34 crc kubenswrapper[4716]: I1209 15:44:34.131569 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-30c9-account-create-update-vtv2g"] Dec 09 15:44:34 crc kubenswrapper[4716]: I1209 15:44:34.143240 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-7j5l9"] Dec 09 15:44:34 crc kubenswrapper[4716]: I1209 15:44:34.154198 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-7a56-account-create-update-shpjz"] Dec 09 15:44:34 crc kubenswrapper[4716]: I1209 15:44:34.163510 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-e5db-account-create-update-gk6pn"] Dec 09 15:44:34 crc kubenswrapper[4716]: I1209 15:44:34.172741 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-6jgz9"] Dec 09 15:44:35 crc kubenswrapper[4716]: I1209 15:44:35.230923 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ed85f3c-c70d-4400-8e17-28f12f0a4dbb" path="/var/lib/kubelet/pods/2ed85f3c-c70d-4400-8e17-28f12f0a4dbb/volumes" Dec 09 15:44:35 crc kubenswrapper[4716]: I1209 15:44:35.231713 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="35c52dde-aad6-413a-9d1f-5bd7078372b4" path="/var/lib/kubelet/pods/35c52dde-aad6-413a-9d1f-5bd7078372b4/volumes" Dec 09 15:44:35 crc kubenswrapper[4716]: I1209 15:44:35.232329 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3e5ab44d-6c6f-46d2-829d-a3699d131e2d" path="/var/lib/kubelet/pods/3e5ab44d-6c6f-46d2-829d-a3699d131e2d/volumes" Dec 09 15:44:35 crc kubenswrapper[4716]: I1209 15:44:35.233067 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0262168-a0cf-42c8-9ec5-cdb237c71db3" path="/var/lib/kubelet/pods/a0262168-a0cf-42c8-9ec5-cdb237c71db3/volumes" Dec 09 15:44:35 crc kubenswrapper[4716]: I1209 15:44:35.234306 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed21be51-d0c7-4c2b-ac68-c22f8664dea8" path="/var/lib/kubelet/pods/ed21be51-d0c7-4c2b-ac68-c22f8664dea8/volumes" Dec 09 15:44:36 crc kubenswrapper[4716]: E1209 15:44:36.219005 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:44:36 crc kubenswrapper[4716]: E1209 15:44:36.219922 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:44:47 crc kubenswrapper[4716]: I1209 15:44:47.922683 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 15:44:47 crc kubenswrapper[4716]: I1209 15:44:47.923241 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 15:44:49 crc kubenswrapper[4716]: E1209 15:44:49.217107 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:44:51 crc kubenswrapper[4716]: E1209 15:44:51.215952 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:45:00 crc kubenswrapper[4716]: I1209 15:45:00.174912 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421585-sgsfr"] Dec 09 15:45:00 crc kubenswrapper[4716]: I1209 15:45:00.177851 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421585-sgsfr" Dec 09 15:45:00 crc kubenswrapper[4716]: I1209 15:45:00.180071 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 09 15:45:00 crc kubenswrapper[4716]: I1209 15:45:00.180556 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 09 15:45:00 crc kubenswrapper[4716]: I1209 15:45:00.188611 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421585-sgsfr"] Dec 09 15:45:00 crc kubenswrapper[4716]: E1209 15:45:00.240813 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:45:00 crc kubenswrapper[4716]: I1209 15:45:00.354767 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8d577797-357f-4847-9841-6469587f7285-config-volume\") pod \"collect-profiles-29421585-sgsfr\" (UID: \"8d577797-357f-4847-9841-6469587f7285\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421585-sgsfr" Dec 09 15:45:00 crc kubenswrapper[4716]: I1209 15:45:00.354977 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8d577797-357f-4847-9841-6469587f7285-secret-volume\") pod \"collect-profiles-29421585-sgsfr\" (UID: \"8d577797-357f-4847-9841-6469587f7285\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421585-sgsfr" Dec 09 15:45:00 crc kubenswrapper[4716]: I1209 15:45:00.355046 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jv4rx\" (UniqueName: \"kubernetes.io/projected/8d577797-357f-4847-9841-6469587f7285-kube-api-access-jv4rx\") pod \"collect-profiles-29421585-sgsfr\" (UID: \"8d577797-357f-4847-9841-6469587f7285\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421585-sgsfr" Dec 09 15:45:00 crc kubenswrapper[4716]: I1209 15:45:00.457054 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8d577797-357f-4847-9841-6469587f7285-secret-volume\") pod \"collect-profiles-29421585-sgsfr\" (UID: \"8d577797-357f-4847-9841-6469587f7285\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421585-sgsfr" Dec 09 15:45:00 crc kubenswrapper[4716]: I1209 15:45:00.457135 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jv4rx\" (UniqueName: \"kubernetes.io/projected/8d577797-357f-4847-9841-6469587f7285-kube-api-access-jv4rx\") pod \"collect-profiles-29421585-sgsfr\" (UID: \"8d577797-357f-4847-9841-6469587f7285\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421585-sgsfr" Dec 09 15:45:00 crc kubenswrapper[4716]: I1209 15:45:00.457239 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8d577797-357f-4847-9841-6469587f7285-config-volume\") pod \"collect-profiles-29421585-sgsfr\" (UID: \"8d577797-357f-4847-9841-6469587f7285\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421585-sgsfr" Dec 09 15:45:00 crc kubenswrapper[4716]: I1209 15:45:00.458214 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8d577797-357f-4847-9841-6469587f7285-config-volume\") pod \"collect-profiles-29421585-sgsfr\" (UID: \"8d577797-357f-4847-9841-6469587f7285\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421585-sgsfr" Dec 09 15:45:00 crc kubenswrapper[4716]: I1209 15:45:00.474581 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8d577797-357f-4847-9841-6469587f7285-secret-volume\") pod \"collect-profiles-29421585-sgsfr\" (UID: \"8d577797-357f-4847-9841-6469587f7285\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421585-sgsfr" Dec 09 15:45:00 crc kubenswrapper[4716]: I1209 15:45:00.474787 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jv4rx\" (UniqueName: \"kubernetes.io/projected/8d577797-357f-4847-9841-6469587f7285-kube-api-access-jv4rx\") pod \"collect-profiles-29421585-sgsfr\" (UID: \"8d577797-357f-4847-9841-6469587f7285\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421585-sgsfr" Dec 09 15:45:00 crc kubenswrapper[4716]: I1209 15:45:00.542318 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421585-sgsfr" Dec 09 15:45:00 crc kubenswrapper[4716]: I1209 15:45:00.887949 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-vp4jf"] Dec 09 15:45:00 crc kubenswrapper[4716]: I1209 15:45:00.891215 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vp4jf" Dec 09 15:45:00 crc kubenswrapper[4716]: I1209 15:45:00.900951 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vp4jf"] Dec 09 15:45:00 crc kubenswrapper[4716]: I1209 15:45:00.977098 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5fbwp\" (UniqueName: \"kubernetes.io/projected/b9ddafd8-a737-46bc-8c82-d214067d7f0d-kube-api-access-5fbwp\") pod \"redhat-marketplace-vp4jf\" (UID: \"b9ddafd8-a737-46bc-8c82-d214067d7f0d\") " pod="openshift-marketplace/redhat-marketplace-vp4jf" Dec 09 15:45:00 crc kubenswrapper[4716]: I1209 15:45:00.977530 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9ddafd8-a737-46bc-8c82-d214067d7f0d-catalog-content\") pod \"redhat-marketplace-vp4jf\" (UID: \"b9ddafd8-a737-46bc-8c82-d214067d7f0d\") " pod="openshift-marketplace/redhat-marketplace-vp4jf" Dec 09 15:45:00 crc kubenswrapper[4716]: I1209 15:45:00.977642 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9ddafd8-a737-46bc-8c82-d214067d7f0d-utilities\") pod \"redhat-marketplace-vp4jf\" (UID: \"b9ddafd8-a737-46bc-8c82-d214067d7f0d\") " pod="openshift-marketplace/redhat-marketplace-vp4jf" Dec 09 15:45:01 crc kubenswrapper[4716]: I1209 15:45:01.061365 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421585-sgsfr"] Dec 09 15:45:01 crc kubenswrapper[4716]: I1209 15:45:01.080205 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5fbwp\" (UniqueName: \"kubernetes.io/projected/b9ddafd8-a737-46bc-8c82-d214067d7f0d-kube-api-access-5fbwp\") pod \"redhat-marketplace-vp4jf\" (UID: \"b9ddafd8-a737-46bc-8c82-d214067d7f0d\") " pod="openshift-marketplace/redhat-marketplace-vp4jf" Dec 09 15:45:01 crc kubenswrapper[4716]: I1209 15:45:01.080295 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9ddafd8-a737-46bc-8c82-d214067d7f0d-catalog-content\") pod \"redhat-marketplace-vp4jf\" (UID: \"b9ddafd8-a737-46bc-8c82-d214067d7f0d\") " pod="openshift-marketplace/redhat-marketplace-vp4jf" Dec 09 15:45:01 crc kubenswrapper[4716]: I1209 15:45:01.080391 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9ddafd8-a737-46bc-8c82-d214067d7f0d-utilities\") pod \"redhat-marketplace-vp4jf\" (UID: \"b9ddafd8-a737-46bc-8c82-d214067d7f0d\") " pod="openshift-marketplace/redhat-marketplace-vp4jf" Dec 09 15:45:01 crc kubenswrapper[4716]: I1209 15:45:01.081108 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9ddafd8-a737-46bc-8c82-d214067d7f0d-utilities\") pod \"redhat-marketplace-vp4jf\" (UID: \"b9ddafd8-a737-46bc-8c82-d214067d7f0d\") " pod="openshift-marketplace/redhat-marketplace-vp4jf" Dec 09 15:45:01 crc kubenswrapper[4716]: I1209 15:45:01.082085 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9ddafd8-a737-46bc-8c82-d214067d7f0d-catalog-content\") pod \"redhat-marketplace-vp4jf\" (UID: \"b9ddafd8-a737-46bc-8c82-d214067d7f0d\") " pod="openshift-marketplace/redhat-marketplace-vp4jf" Dec 09 15:45:01 crc kubenswrapper[4716]: I1209 15:45:01.105446 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5fbwp\" (UniqueName: \"kubernetes.io/projected/b9ddafd8-a737-46bc-8c82-d214067d7f0d-kube-api-access-5fbwp\") pod \"redhat-marketplace-vp4jf\" (UID: \"b9ddafd8-a737-46bc-8c82-d214067d7f0d\") " pod="openshift-marketplace/redhat-marketplace-vp4jf" Dec 09 15:45:01 crc kubenswrapper[4716]: I1209 15:45:01.224810 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vp4jf" Dec 09 15:45:01 crc kubenswrapper[4716]: I1209 15:45:01.656319 4716 generic.go:334] "Generic (PLEG): container finished" podID="8d577797-357f-4847-9841-6469587f7285" containerID="d3d92986e7f39730ec0a4896e2a7b3d24cd1cb1d95a3cc0fd85e62023c6505a9" exitCode=0 Dec 09 15:45:01 crc kubenswrapper[4716]: I1209 15:45:01.656368 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421585-sgsfr" event={"ID":"8d577797-357f-4847-9841-6469587f7285","Type":"ContainerDied","Data":"d3d92986e7f39730ec0a4896e2a7b3d24cd1cb1d95a3cc0fd85e62023c6505a9"} Dec 09 15:45:01 crc kubenswrapper[4716]: I1209 15:45:01.656403 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421585-sgsfr" event={"ID":"8d577797-357f-4847-9841-6469587f7285","Type":"ContainerStarted","Data":"cb578b4f265eeab8e6c89693e4134a4829a3bbb9bd8115c2e4a09c8501e9e175"} Dec 09 15:45:01 crc kubenswrapper[4716]: I1209 15:45:01.771803 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vp4jf"] Dec 09 15:45:01 crc kubenswrapper[4716]: W1209 15:45:01.772514 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb9ddafd8_a737_46bc_8c82_d214067d7f0d.slice/crio-32599bd575d7a8b0eee4955f9cc7e21481c5ad72d05aa58c32f99244e37a0bb9 WatchSource:0}: Error finding container 32599bd575d7a8b0eee4955f9cc7e21481c5ad72d05aa58c32f99244e37a0bb9: Status 404 returned error can't find the container with id 32599bd575d7a8b0eee4955f9cc7e21481c5ad72d05aa58c32f99244e37a0bb9 Dec 09 15:45:02 crc kubenswrapper[4716]: I1209 15:45:02.666898 4716 generic.go:334] "Generic (PLEG): container finished" podID="b9ddafd8-a737-46bc-8c82-d214067d7f0d" containerID="e12b6ab0e19dd0e12b0365bb3cf3b5f69fd9615cbb136e6092bfdcfdd8b6ede2" exitCode=0 Dec 09 15:45:02 crc kubenswrapper[4716]: I1209 15:45:02.666966 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vp4jf" event={"ID":"b9ddafd8-a737-46bc-8c82-d214067d7f0d","Type":"ContainerDied","Data":"e12b6ab0e19dd0e12b0365bb3cf3b5f69fd9615cbb136e6092bfdcfdd8b6ede2"} Dec 09 15:45:02 crc kubenswrapper[4716]: I1209 15:45:02.667271 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vp4jf" event={"ID":"b9ddafd8-a737-46bc-8c82-d214067d7f0d","Type":"ContainerStarted","Data":"32599bd575d7a8b0eee4955f9cc7e21481c5ad72d05aa58c32f99244e37a0bb9"} Dec 09 15:45:03 crc kubenswrapper[4716]: I1209 15:45:03.162508 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421585-sgsfr" Dec 09 15:45:03 crc kubenswrapper[4716]: I1209 15:45:03.252522 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8d577797-357f-4847-9841-6469587f7285-secret-volume\") pod \"8d577797-357f-4847-9841-6469587f7285\" (UID: \"8d577797-357f-4847-9841-6469587f7285\") " Dec 09 15:45:03 crc kubenswrapper[4716]: I1209 15:45:03.254411 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jv4rx\" (UniqueName: \"kubernetes.io/projected/8d577797-357f-4847-9841-6469587f7285-kube-api-access-jv4rx\") pod \"8d577797-357f-4847-9841-6469587f7285\" (UID: \"8d577797-357f-4847-9841-6469587f7285\") " Dec 09 15:45:03 crc kubenswrapper[4716]: I1209 15:45:03.254609 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8d577797-357f-4847-9841-6469587f7285-config-volume\") pod \"8d577797-357f-4847-9841-6469587f7285\" (UID: \"8d577797-357f-4847-9841-6469587f7285\") " Dec 09 15:45:03 crc kubenswrapper[4716]: I1209 15:45:03.255666 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d577797-357f-4847-9841-6469587f7285-config-volume" (OuterVolumeSpecName: "config-volume") pod "8d577797-357f-4847-9841-6469587f7285" (UID: "8d577797-357f-4847-9841-6469587f7285"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 15:45:03 crc kubenswrapper[4716]: I1209 15:45:03.260232 4716 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8d577797-357f-4847-9841-6469587f7285-config-volume\") on node \"crc\" DevicePath \"\"" Dec 09 15:45:03 crc kubenswrapper[4716]: I1209 15:45:03.263722 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d577797-357f-4847-9841-6469587f7285-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "8d577797-357f-4847-9841-6469587f7285" (UID: "8d577797-357f-4847-9841-6469587f7285"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:45:03 crc kubenswrapper[4716]: E1209 15:45:03.266162 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:45:03 crc kubenswrapper[4716]: I1209 15:45:03.275469 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d577797-357f-4847-9841-6469587f7285-kube-api-access-jv4rx" (OuterVolumeSpecName: "kube-api-access-jv4rx") pod "8d577797-357f-4847-9841-6469587f7285" (UID: "8d577797-357f-4847-9841-6469587f7285"). InnerVolumeSpecName "kube-api-access-jv4rx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:45:03 crc kubenswrapper[4716]: I1209 15:45:03.362773 4716 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8d577797-357f-4847-9841-6469587f7285-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 09 15:45:03 crc kubenswrapper[4716]: I1209 15:45:03.362817 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jv4rx\" (UniqueName: \"kubernetes.io/projected/8d577797-357f-4847-9841-6469587f7285-kube-api-access-jv4rx\") on node \"crc\" DevicePath \"\"" Dec 09 15:45:03 crc kubenswrapper[4716]: I1209 15:45:03.688407 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vp4jf" event={"ID":"b9ddafd8-a737-46bc-8c82-d214067d7f0d","Type":"ContainerStarted","Data":"0ac3f3c131acc41f18a94bfbab84214e4d9247ce607d8ff4913808554ab370a4"} Dec 09 15:45:03 crc kubenswrapper[4716]: I1209 15:45:03.696045 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421585-sgsfr" event={"ID":"8d577797-357f-4847-9841-6469587f7285","Type":"ContainerDied","Data":"cb578b4f265eeab8e6c89693e4134a4829a3bbb9bd8115c2e4a09c8501e9e175"} Dec 09 15:45:03 crc kubenswrapper[4716]: I1209 15:45:03.696099 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cb578b4f265eeab8e6c89693e4134a4829a3bbb9bd8115c2e4a09c8501e9e175" Dec 09 15:45:03 crc kubenswrapper[4716]: I1209 15:45:03.696187 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421585-sgsfr" Dec 09 15:45:04 crc kubenswrapper[4716]: I1209 15:45:04.275112 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421540-z6t4d"] Dec 09 15:45:04 crc kubenswrapper[4716]: I1209 15:45:04.285612 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421540-z6t4d"] Dec 09 15:45:04 crc kubenswrapper[4716]: I1209 15:45:04.711462 4716 generic.go:334] "Generic (PLEG): container finished" podID="b9ddafd8-a737-46bc-8c82-d214067d7f0d" containerID="0ac3f3c131acc41f18a94bfbab84214e4d9247ce607d8ff4913808554ab370a4" exitCode=0 Dec 09 15:45:04 crc kubenswrapper[4716]: I1209 15:45:04.711605 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vp4jf" event={"ID":"b9ddafd8-a737-46bc-8c82-d214067d7f0d","Type":"ContainerDied","Data":"0ac3f3c131acc41f18a94bfbab84214e4d9247ce607d8ff4913808554ab370a4"} Dec 09 15:45:05 crc kubenswrapper[4716]: I1209 15:45:05.228318 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e0c79e5-04c3-4242-9472-0cb67c34b499" path="/var/lib/kubelet/pods/8e0c79e5-04c3-4242-9472-0cb67c34b499/volumes" Dec 09 15:45:05 crc kubenswrapper[4716]: I1209 15:45:05.728718 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vp4jf" event={"ID":"b9ddafd8-a737-46bc-8c82-d214067d7f0d","Type":"ContainerStarted","Data":"1218f3529a0a90fb0e0d1df1e622719b37c6ff3e10c8acd449fd81f989dff992"} Dec 09 15:45:05 crc kubenswrapper[4716]: I1209 15:45:05.758616 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-vp4jf" podStartSLOduration=3.211987769 podStartE2EDuration="5.758597153s" podCreationTimestamp="2025-12-09 15:45:00 +0000 UTC" firstStartedPulling="2025-12-09 15:45:02.668843089 +0000 UTC m=+2189.823587087" lastFinishedPulling="2025-12-09 15:45:05.215452473 +0000 UTC m=+2192.370196471" observedRunningTime="2025-12-09 15:45:05.749639097 +0000 UTC m=+2192.904383085" watchObservedRunningTime="2025-12-09 15:45:05.758597153 +0000 UTC m=+2192.913341141" Dec 09 15:45:10 crc kubenswrapper[4716]: I1209 15:45:10.040391 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-dm4wb"] Dec 09 15:45:10 crc kubenswrapper[4716]: I1209 15:45:10.053899 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-dm4wb"] Dec 09 15:45:11 crc kubenswrapper[4716]: I1209 15:45:11.236539 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="361ee6cb-d557-4f46-8cf8-da9c121604e2" path="/var/lib/kubelet/pods/361ee6cb-d557-4f46-8cf8-da9c121604e2/volumes" Dec 09 15:45:11 crc kubenswrapper[4716]: I1209 15:45:11.239893 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-vp4jf" Dec 09 15:45:11 crc kubenswrapper[4716]: I1209 15:45:11.239925 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-vp4jf" Dec 09 15:45:11 crc kubenswrapper[4716]: I1209 15:45:11.279842 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-vp4jf" Dec 09 15:45:11 crc kubenswrapper[4716]: I1209 15:45:11.855569 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-vp4jf" Dec 09 15:45:11 crc kubenswrapper[4716]: I1209 15:45:11.909902 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vp4jf"] Dec 09 15:45:12 crc kubenswrapper[4716]: E1209 15:45:12.216539 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:45:12 crc kubenswrapper[4716]: I1209 15:45:12.861455 4716 scope.go:117] "RemoveContainer" containerID="67b0b43296ec6b50302a2bf5f02cf97bbc69733172ccc4d8160c8e022fd3f527" Dec 09 15:45:12 crc kubenswrapper[4716]: I1209 15:45:12.971996 4716 scope.go:117] "RemoveContainer" containerID="b68e0ace6fd0a5bf7ff95ee992b7aa2eed76561ada598c44b469dd9e698ead2b" Dec 09 15:45:13 crc kubenswrapper[4716]: I1209 15:45:13.048764 4716 scope.go:117] "RemoveContainer" containerID="a4bd52af2627d0fdaa7912ece7f1ef16690a42bc345f1501ef0fece4bc24c8fe" Dec 09 15:45:13 crc kubenswrapper[4716]: I1209 15:45:13.095602 4716 scope.go:117] "RemoveContainer" containerID="79cf87c2a5c48521ab559f03622394647a4573e3926b0ae5e46e6b0c396dfbb6" Dec 09 15:45:13 crc kubenswrapper[4716]: I1209 15:45:13.143749 4716 scope.go:117] "RemoveContainer" containerID="d5ea2f329b5face113ee9b7f45d706f93184ac67550a181dd64cfe075ca17459" Dec 09 15:45:13 crc kubenswrapper[4716]: I1209 15:45:13.205294 4716 scope.go:117] "RemoveContainer" containerID="b8b37f2265ddc9bdd7fee3e78d96e3fb58abd7ce772c979f4dc7853e16e9018a" Dec 09 15:45:13 crc kubenswrapper[4716]: I1209 15:45:13.270430 4716 scope.go:117] "RemoveContainer" containerID="53dcab9f1a907a17398f73489b77555030ba22d774eca2807c6e6025b2f0db22" Dec 09 15:45:13 crc kubenswrapper[4716]: I1209 15:45:13.304702 4716 scope.go:117] "RemoveContainer" containerID="cd06bf863ee800bf49b74513ca377f9acf689ffa0e5dfe55a5264f68c1fe45ac" Dec 09 15:45:13 crc kubenswrapper[4716]: I1209 15:45:13.356335 4716 scope.go:117] "RemoveContainer" containerID="944e2b0ba86a1ee0740e595edc8a7f14470317f47f4d621d65711a13e2562075" Dec 09 15:45:13 crc kubenswrapper[4716]: I1209 15:45:13.382413 4716 scope.go:117] "RemoveContainer" containerID="e9d1a0deadf1c188849bf75839b0bbefff8b2479effa195c6f8949b4891906bf" Dec 09 15:45:13 crc kubenswrapper[4716]: I1209 15:45:13.818999 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-vp4jf" podUID="b9ddafd8-a737-46bc-8c82-d214067d7f0d" containerName="registry-server" containerID="cri-o://1218f3529a0a90fb0e0d1df1e622719b37c6ff3e10c8acd449fd81f989dff992" gracePeriod=2 Dec 09 15:45:14 crc kubenswrapper[4716]: I1209 15:45:14.328059 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vp4jf" Dec 09 15:45:14 crc kubenswrapper[4716]: I1209 15:45:14.382954 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9ddafd8-a737-46bc-8c82-d214067d7f0d-catalog-content\") pod \"b9ddafd8-a737-46bc-8c82-d214067d7f0d\" (UID: \"b9ddafd8-a737-46bc-8c82-d214067d7f0d\") " Dec 09 15:45:14 crc kubenswrapper[4716]: I1209 15:45:14.383348 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9ddafd8-a737-46bc-8c82-d214067d7f0d-utilities\") pod \"b9ddafd8-a737-46bc-8c82-d214067d7f0d\" (UID: \"b9ddafd8-a737-46bc-8c82-d214067d7f0d\") " Dec 09 15:45:14 crc kubenswrapper[4716]: I1209 15:45:14.383479 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5fbwp\" (UniqueName: \"kubernetes.io/projected/b9ddafd8-a737-46bc-8c82-d214067d7f0d-kube-api-access-5fbwp\") pod \"b9ddafd8-a737-46bc-8c82-d214067d7f0d\" (UID: \"b9ddafd8-a737-46bc-8c82-d214067d7f0d\") " Dec 09 15:45:14 crc kubenswrapper[4716]: I1209 15:45:14.386183 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b9ddafd8-a737-46bc-8c82-d214067d7f0d-utilities" (OuterVolumeSpecName: "utilities") pod "b9ddafd8-a737-46bc-8c82-d214067d7f0d" (UID: "b9ddafd8-a737-46bc-8c82-d214067d7f0d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:45:14 crc kubenswrapper[4716]: I1209 15:45:14.394919 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9ddafd8-a737-46bc-8c82-d214067d7f0d-kube-api-access-5fbwp" (OuterVolumeSpecName: "kube-api-access-5fbwp") pod "b9ddafd8-a737-46bc-8c82-d214067d7f0d" (UID: "b9ddafd8-a737-46bc-8c82-d214067d7f0d"). InnerVolumeSpecName "kube-api-access-5fbwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:45:14 crc kubenswrapper[4716]: I1209 15:45:14.418980 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b9ddafd8-a737-46bc-8c82-d214067d7f0d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b9ddafd8-a737-46bc-8c82-d214067d7f0d" (UID: "b9ddafd8-a737-46bc-8c82-d214067d7f0d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:45:14 crc kubenswrapper[4716]: I1209 15:45:14.487192 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9ddafd8-a737-46bc-8c82-d214067d7f0d-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 15:45:14 crc kubenswrapper[4716]: I1209 15:45:14.487521 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5fbwp\" (UniqueName: \"kubernetes.io/projected/b9ddafd8-a737-46bc-8c82-d214067d7f0d-kube-api-access-5fbwp\") on node \"crc\" DevicePath \"\"" Dec 09 15:45:14 crc kubenswrapper[4716]: I1209 15:45:14.487592 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9ddafd8-a737-46bc-8c82-d214067d7f0d-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 15:45:14 crc kubenswrapper[4716]: I1209 15:45:14.832097 4716 generic.go:334] "Generic (PLEG): container finished" podID="b9ddafd8-a737-46bc-8c82-d214067d7f0d" containerID="1218f3529a0a90fb0e0d1df1e622719b37c6ff3e10c8acd449fd81f989dff992" exitCode=0 Dec 09 15:45:14 crc kubenswrapper[4716]: I1209 15:45:14.832153 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vp4jf" event={"ID":"b9ddafd8-a737-46bc-8c82-d214067d7f0d","Type":"ContainerDied","Data":"1218f3529a0a90fb0e0d1df1e622719b37c6ff3e10c8acd449fd81f989dff992"} Dec 09 15:45:14 crc kubenswrapper[4716]: I1209 15:45:14.832185 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vp4jf" event={"ID":"b9ddafd8-a737-46bc-8c82-d214067d7f0d","Type":"ContainerDied","Data":"32599bd575d7a8b0eee4955f9cc7e21481c5ad72d05aa58c32f99244e37a0bb9"} Dec 09 15:45:14 crc kubenswrapper[4716]: I1209 15:45:14.832190 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vp4jf" Dec 09 15:45:14 crc kubenswrapper[4716]: I1209 15:45:14.832205 4716 scope.go:117] "RemoveContainer" containerID="1218f3529a0a90fb0e0d1df1e622719b37c6ff3e10c8acd449fd81f989dff992" Dec 09 15:45:14 crc kubenswrapper[4716]: I1209 15:45:14.859065 4716 scope.go:117] "RemoveContainer" containerID="0ac3f3c131acc41f18a94bfbab84214e4d9247ce607d8ff4913808554ab370a4" Dec 09 15:45:14 crc kubenswrapper[4716]: I1209 15:45:14.877724 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vp4jf"] Dec 09 15:45:14 crc kubenswrapper[4716]: I1209 15:45:14.888109 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-vp4jf"] Dec 09 15:45:14 crc kubenswrapper[4716]: I1209 15:45:14.906732 4716 scope.go:117] "RemoveContainer" containerID="e12b6ab0e19dd0e12b0365bb3cf3b5f69fd9615cbb136e6092bfdcfdd8b6ede2" Dec 09 15:45:14 crc kubenswrapper[4716]: I1209 15:45:14.977741 4716 scope.go:117] "RemoveContainer" containerID="1218f3529a0a90fb0e0d1df1e622719b37c6ff3e10c8acd449fd81f989dff992" Dec 09 15:45:14 crc kubenswrapper[4716]: E1209 15:45:14.978301 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1218f3529a0a90fb0e0d1df1e622719b37c6ff3e10c8acd449fd81f989dff992\": container with ID starting with 1218f3529a0a90fb0e0d1df1e622719b37c6ff3e10c8acd449fd81f989dff992 not found: ID does not exist" containerID="1218f3529a0a90fb0e0d1df1e622719b37c6ff3e10c8acd449fd81f989dff992" Dec 09 15:45:14 crc kubenswrapper[4716]: I1209 15:45:14.978337 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1218f3529a0a90fb0e0d1df1e622719b37c6ff3e10c8acd449fd81f989dff992"} err="failed to get container status \"1218f3529a0a90fb0e0d1df1e622719b37c6ff3e10c8acd449fd81f989dff992\": rpc error: code = NotFound desc = could not find container \"1218f3529a0a90fb0e0d1df1e622719b37c6ff3e10c8acd449fd81f989dff992\": container with ID starting with 1218f3529a0a90fb0e0d1df1e622719b37c6ff3e10c8acd449fd81f989dff992 not found: ID does not exist" Dec 09 15:45:14 crc kubenswrapper[4716]: I1209 15:45:14.978364 4716 scope.go:117] "RemoveContainer" containerID="0ac3f3c131acc41f18a94bfbab84214e4d9247ce607d8ff4913808554ab370a4" Dec 09 15:45:14 crc kubenswrapper[4716]: E1209 15:45:14.978730 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0ac3f3c131acc41f18a94bfbab84214e4d9247ce607d8ff4913808554ab370a4\": container with ID starting with 0ac3f3c131acc41f18a94bfbab84214e4d9247ce607d8ff4913808554ab370a4 not found: ID does not exist" containerID="0ac3f3c131acc41f18a94bfbab84214e4d9247ce607d8ff4913808554ab370a4" Dec 09 15:45:14 crc kubenswrapper[4716]: I1209 15:45:14.978775 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ac3f3c131acc41f18a94bfbab84214e4d9247ce607d8ff4913808554ab370a4"} err="failed to get container status \"0ac3f3c131acc41f18a94bfbab84214e4d9247ce607d8ff4913808554ab370a4\": rpc error: code = NotFound desc = could not find container \"0ac3f3c131acc41f18a94bfbab84214e4d9247ce607d8ff4913808554ab370a4\": container with ID starting with 0ac3f3c131acc41f18a94bfbab84214e4d9247ce607d8ff4913808554ab370a4 not found: ID does not exist" Dec 09 15:45:14 crc kubenswrapper[4716]: I1209 15:45:14.978808 4716 scope.go:117] "RemoveContainer" containerID="e12b6ab0e19dd0e12b0365bb3cf3b5f69fd9615cbb136e6092bfdcfdd8b6ede2" Dec 09 15:45:14 crc kubenswrapper[4716]: E1209 15:45:14.979087 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e12b6ab0e19dd0e12b0365bb3cf3b5f69fd9615cbb136e6092bfdcfdd8b6ede2\": container with ID starting with e12b6ab0e19dd0e12b0365bb3cf3b5f69fd9615cbb136e6092bfdcfdd8b6ede2 not found: ID does not exist" containerID="e12b6ab0e19dd0e12b0365bb3cf3b5f69fd9615cbb136e6092bfdcfdd8b6ede2" Dec 09 15:45:14 crc kubenswrapper[4716]: I1209 15:45:14.979111 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e12b6ab0e19dd0e12b0365bb3cf3b5f69fd9615cbb136e6092bfdcfdd8b6ede2"} err="failed to get container status \"e12b6ab0e19dd0e12b0365bb3cf3b5f69fd9615cbb136e6092bfdcfdd8b6ede2\": rpc error: code = NotFound desc = could not find container \"e12b6ab0e19dd0e12b0365bb3cf3b5f69fd9615cbb136e6092bfdcfdd8b6ede2\": container with ID starting with e12b6ab0e19dd0e12b0365bb3cf3b5f69fd9615cbb136e6092bfdcfdd8b6ede2 not found: ID does not exist" Dec 09 15:45:15 crc kubenswrapper[4716]: I1209 15:45:15.228156 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9ddafd8-a737-46bc-8c82-d214067d7f0d" path="/var/lib/kubelet/pods/b9ddafd8-a737-46bc-8c82-d214067d7f0d/volumes" Dec 09 15:45:17 crc kubenswrapper[4716]: I1209 15:45:17.028559 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-a657-account-create-update-fgzll"] Dec 09 15:45:17 crc kubenswrapper[4716]: I1209 15:45:17.051995 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-create-rrjbc"] Dec 09 15:45:17 crc kubenswrapper[4716]: I1209 15:45:17.062883 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-a657-account-create-update-fgzll"] Dec 09 15:45:17 crc kubenswrapper[4716]: I1209 15:45:17.073858 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-create-rrjbc"] Dec 09 15:45:17 crc kubenswrapper[4716]: I1209 15:45:17.227531 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="19d87406-0357-4685-9062-4c740b1e0346" path="/var/lib/kubelet/pods/19d87406-0357-4685-9062-4c740b1e0346/volumes" Dec 09 15:45:17 crc kubenswrapper[4716]: I1209 15:45:17.228388 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="94a3f2ac-e9a9-4974-ae3d-20fded31a2fa" path="/var/lib/kubelet/pods/94a3f2ac-e9a9-4974-ae3d-20fded31a2fa/volumes" Dec 09 15:45:17 crc kubenswrapper[4716]: I1209 15:45:17.922423 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 15:45:17 crc kubenswrapper[4716]: I1209 15:45:17.922815 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 15:45:17 crc kubenswrapper[4716]: I1209 15:45:17.922878 4716 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" Dec 09 15:45:17 crc kubenswrapper[4716]: I1209 15:45:17.923912 4716 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7bc95cea1510600028ea0972ceb137a19bfc71882cae48ac9032c1a550a517f4"} pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 15:45:17 crc kubenswrapper[4716]: I1209 15:45:17.923980 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" containerID="cri-o://7bc95cea1510600028ea0972ceb137a19bfc71882cae48ac9032c1a550a517f4" gracePeriod=600 Dec 09 15:45:18 crc kubenswrapper[4716]: E1209 15:45:18.215406 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:45:18 crc kubenswrapper[4716]: I1209 15:45:18.905452 4716 generic.go:334] "Generic (PLEG): container finished" podID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerID="7bc95cea1510600028ea0972ceb137a19bfc71882cae48ac9032c1a550a517f4" exitCode=0 Dec 09 15:45:18 crc kubenswrapper[4716]: I1209 15:45:18.906144 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerDied","Data":"7bc95cea1510600028ea0972ceb137a19bfc71882cae48ac9032c1a550a517f4"} Dec 09 15:45:18 crc kubenswrapper[4716]: I1209 15:45:18.906181 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerStarted","Data":"dd82e2eedfe1213cad4ba613e7cdac44ed5c5060200b5e4fcfb856fc8f876bf1"} Dec 09 15:45:18 crc kubenswrapper[4716]: I1209 15:45:18.906204 4716 scope.go:117] "RemoveContainer" containerID="f23d8f973b0c391889ecea36a999ba03ea85dda0d274716304ac66db87b6b8dc" Dec 09 15:45:26 crc kubenswrapper[4716]: E1209 15:45:26.215925 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:45:31 crc kubenswrapper[4716]: E1209 15:45:31.220889 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:45:34 crc kubenswrapper[4716]: I1209 15:45:34.084129 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-9gtbn"] Dec 09 15:45:34 crc kubenswrapper[4716]: I1209 15:45:34.104042 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-9gtbn"] Dec 09 15:45:35 crc kubenswrapper[4716]: I1209 15:45:35.033419 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-2zp9h"] Dec 09 15:45:35 crc kubenswrapper[4716]: I1209 15:45:35.045637 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-2zp9h"] Dec 09 15:45:35 crc kubenswrapper[4716]: I1209 15:45:35.228440 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a66dc81-048f-4d25-b04f-16e8063d92b7" path="/var/lib/kubelet/pods/2a66dc81-048f-4d25-b04f-16e8063d92b7/volumes" Dec 09 15:45:35 crc kubenswrapper[4716]: I1209 15:45:35.229128 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f95389e-c579-4294-b7a5-f22c93e45d85" path="/var/lib/kubelet/pods/8f95389e-c579-4294-b7a5-f22c93e45d85/volumes" Dec 09 15:45:37 crc kubenswrapper[4716]: I1209 15:45:37.030933 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-sync-lzlrv"] Dec 09 15:45:37 crc kubenswrapper[4716]: I1209 15:45:37.042275 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-sync-lzlrv"] Dec 09 15:45:37 crc kubenswrapper[4716]: I1209 15:45:37.226476 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="560906c8-b759-4faa-9a29-86f3c8f92cdb" path="/var/lib/kubelet/pods/560906c8-b759-4faa-9a29-86f3c8f92cdb/volumes" Dec 09 15:45:39 crc kubenswrapper[4716]: E1209 15:45:39.220237 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:45:43 crc kubenswrapper[4716]: E1209 15:45:43.216483 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:45:50 crc kubenswrapper[4716]: E1209 15:45:50.216581 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:45:56 crc kubenswrapper[4716]: E1209 15:45:56.216594 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:46:03 crc kubenswrapper[4716]: E1209 15:46:03.236113 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:46:09 crc kubenswrapper[4716]: E1209 15:46:09.216534 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:46:13 crc kubenswrapper[4716]: I1209 15:46:13.649958 4716 scope.go:117] "RemoveContainer" containerID="cfcb48d717c055735f3bb3c3703dbbe7893fd20f3b0142619791442b77f53f52" Dec 09 15:46:13 crc kubenswrapper[4716]: I1209 15:46:13.685849 4716 scope.go:117] "RemoveContainer" containerID="535a83fbc937decb13fd847adb3622678ff80d559b2a5532df02a057b7f164ad" Dec 09 15:46:13 crc kubenswrapper[4716]: I1209 15:46:13.752134 4716 scope.go:117] "RemoveContainer" containerID="d330b664f373e2ad2b15415e7edc2cba5711eed0e0a1b40b85e0d50c5e2fb448" Dec 09 15:46:13 crc kubenswrapper[4716]: I1209 15:46:13.838819 4716 scope.go:117] "RemoveContainer" containerID="589c9999c73663a2a056f75818b84ace2af485ba6208cdb6bba3d61f6b137093" Dec 09 15:46:13 crc kubenswrapper[4716]: I1209 15:46:13.901801 4716 scope.go:117] "RemoveContainer" containerID="c3abb4fb2138e66cb40c0a723ec1f855503c1bb724d38f87c66fab463f516995" Dec 09 15:46:13 crc kubenswrapper[4716]: I1209 15:46:13.936140 4716 scope.go:117] "RemoveContainer" containerID="297b248ce8a37da2b619eddbacd46e33937aff3069e0b86332f34f04a75c9a3f" Dec 09 15:46:14 crc kubenswrapper[4716]: E1209 15:46:14.216542 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:46:22 crc kubenswrapper[4716]: E1209 15:46:22.215810 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:46:24 crc kubenswrapper[4716]: I1209 15:46:24.045499 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-lnp7x"] Dec 09 15:46:24 crc kubenswrapper[4716]: I1209 15:46:24.061764 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-lnp7x"] Dec 09 15:46:25 crc kubenswrapper[4716]: I1209 15:46:25.226223 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b24845fa-f1d0-405e-b329-674efd15b3c6" path="/var/lib/kubelet/pods/b24845fa-f1d0-405e-b329-674efd15b3c6/volumes" Dec 09 15:46:28 crc kubenswrapper[4716]: E1209 15:46:28.240831 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:46:34 crc kubenswrapper[4716]: E1209 15:46:34.216135 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:46:42 crc kubenswrapper[4716]: E1209 15:46:42.221909 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:46:46 crc kubenswrapper[4716]: E1209 15:46:46.216864 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:46:53 crc kubenswrapper[4716]: E1209 15:46:53.233396 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:46:59 crc kubenswrapper[4716]: E1209 15:46:59.217280 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:47:07 crc kubenswrapper[4716]: E1209 15:47:07.218198 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:47:12 crc kubenswrapper[4716]: E1209 15:47:12.216524 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:47:14 crc kubenswrapper[4716]: I1209 15:47:14.106800 4716 scope.go:117] "RemoveContainer" containerID="d7698082c1bf432d676d295fd843194f0662f3667b8231750cf887d93598a62c" Dec 09 15:47:21 crc kubenswrapper[4716]: E1209 15:47:21.219358 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:47:25 crc kubenswrapper[4716]: E1209 15:47:25.217038 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:47:35 crc kubenswrapper[4716]: E1209 15:47:35.216535 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:47:40 crc kubenswrapper[4716]: E1209 15:47:40.216557 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:47:47 crc kubenswrapper[4716]: I1209 15:47:47.922146 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 15:47:47 crc kubenswrapper[4716]: I1209 15:47:47.923913 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 15:47:49 crc kubenswrapper[4716]: E1209 15:47:49.216210 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:47:53 crc kubenswrapper[4716]: E1209 15:47:53.232772 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:48:02 crc kubenswrapper[4716]: E1209 15:48:02.218084 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:48:05 crc kubenswrapper[4716]: I1209 15:48:05.217633 4716 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 09 15:48:05 crc kubenswrapper[4716]: E1209 15:48:05.352802 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 09 15:48:05 crc kubenswrapper[4716]: E1209 15:48:05.352877 4716 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 09 15:48:05 crc kubenswrapper[4716]: E1209 15:48:05.353027 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qfm49,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-msshl_openstack(239c4119-944c-46e1-9425-285eeb6e0204): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 15:48:05 crc kubenswrapper[4716]: E1209 15:48:05.354264 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:48:16 crc kubenswrapper[4716]: E1209 15:48:16.337000 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 15:48:16 crc kubenswrapper[4716]: E1209 15:48:16.337912 4716 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 15:48:16 crc kubenswrapper[4716]: E1209 15:48:16.338110 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5fbh9fh77h667h669h94h589h59dh5bdh5bch9fh68h689h699h559h549h56fh5f5h68fh557h5cch7fh66ch6ch86h56h75h55fh58h5ffhf9h5fdq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lz724,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(29cebb2d-8cdb-49de-a29d-1d02808e46a9): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 15:48:16 crc kubenswrapper[4716]: E1209 15:48:16.339361 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:48:17 crc kubenswrapper[4716]: I1209 15:48:17.921779 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 15:48:17 crc kubenswrapper[4716]: I1209 15:48:17.922149 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 15:48:20 crc kubenswrapper[4716]: E1209 15:48:20.216476 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:48:30 crc kubenswrapper[4716]: E1209 15:48:30.217685 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:48:31 crc kubenswrapper[4716]: E1209 15:48:31.216877 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:48:42 crc kubenswrapper[4716]: E1209 15:48:42.216808 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:48:45 crc kubenswrapper[4716]: E1209 15:48:45.218465 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:48:47 crc kubenswrapper[4716]: I1209 15:48:47.926505 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 15:48:47 crc kubenswrapper[4716]: I1209 15:48:47.927149 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 15:48:47 crc kubenswrapper[4716]: I1209 15:48:47.927211 4716 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" Dec 09 15:48:47 crc kubenswrapper[4716]: I1209 15:48:47.928407 4716 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"dd82e2eedfe1213cad4ba613e7cdac44ed5c5060200b5e4fcfb856fc8f876bf1"} pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 15:48:47 crc kubenswrapper[4716]: I1209 15:48:47.928473 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" containerID="cri-o://dd82e2eedfe1213cad4ba613e7cdac44ed5c5060200b5e4fcfb856fc8f876bf1" gracePeriod=600 Dec 09 15:48:48 crc kubenswrapper[4716]: E1209 15:48:48.076159 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:48:48 crc kubenswrapper[4716]: I1209 15:48:48.397558 4716 generic.go:334] "Generic (PLEG): container finished" podID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerID="dd82e2eedfe1213cad4ba613e7cdac44ed5c5060200b5e4fcfb856fc8f876bf1" exitCode=0 Dec 09 15:48:48 crc kubenswrapper[4716]: I1209 15:48:48.397988 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerDied","Data":"dd82e2eedfe1213cad4ba613e7cdac44ed5c5060200b5e4fcfb856fc8f876bf1"} Dec 09 15:48:48 crc kubenswrapper[4716]: I1209 15:48:48.398032 4716 scope.go:117] "RemoveContainer" containerID="7bc95cea1510600028ea0972ceb137a19bfc71882cae48ac9032c1a550a517f4" Dec 09 15:48:48 crc kubenswrapper[4716]: I1209 15:48:48.398916 4716 scope.go:117] "RemoveContainer" containerID="dd82e2eedfe1213cad4ba613e7cdac44ed5c5060200b5e4fcfb856fc8f876bf1" Dec 09 15:48:48 crc kubenswrapper[4716]: E1209 15:48:48.399223 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:48:56 crc kubenswrapper[4716]: E1209 15:48:56.215569 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:48:57 crc kubenswrapper[4716]: E1209 15:48:57.217237 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:49:01 crc kubenswrapper[4716]: I1209 15:49:01.214562 4716 scope.go:117] "RemoveContainer" containerID="dd82e2eedfe1213cad4ba613e7cdac44ed5c5060200b5e4fcfb856fc8f876bf1" Dec 09 15:49:01 crc kubenswrapper[4716]: E1209 15:49:01.215588 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:49:07 crc kubenswrapper[4716]: I1209 15:49:07.660919 4716 generic.go:334] "Generic (PLEG): container finished" podID="56391e5b-15d4-4f61-9d57-6512b28a5254" containerID="8799b9a1e5f464d56f64a5b0d0d632300120111a75c01e42ccf1b6be90a4c350" exitCode=2 Dec 09 15:49:07 crc kubenswrapper[4716]: I1209 15:49:07.660962 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-b8znx" event={"ID":"56391e5b-15d4-4f61-9d57-6512b28a5254","Type":"ContainerDied","Data":"8799b9a1e5f464d56f64a5b0d0d632300120111a75c01e42ccf1b6be90a4c350"} Dec 09 15:49:08 crc kubenswrapper[4716]: E1209 15:49:08.216135 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:49:09 crc kubenswrapper[4716]: I1209 15:49:09.147836 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-b8znx" Dec 09 15:49:09 crc kubenswrapper[4716]: I1209 15:49:09.286673 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/56391e5b-15d4-4f61-9d57-6512b28a5254-inventory\") pod \"56391e5b-15d4-4f61-9d57-6512b28a5254\" (UID: \"56391e5b-15d4-4f61-9d57-6512b28a5254\") " Dec 09 15:49:09 crc kubenswrapper[4716]: I1209 15:49:09.286920 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lkc5s\" (UniqueName: \"kubernetes.io/projected/56391e5b-15d4-4f61-9d57-6512b28a5254-kube-api-access-lkc5s\") pod \"56391e5b-15d4-4f61-9d57-6512b28a5254\" (UID: \"56391e5b-15d4-4f61-9d57-6512b28a5254\") " Dec 09 15:49:09 crc kubenswrapper[4716]: I1209 15:49:09.286985 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/56391e5b-15d4-4f61-9d57-6512b28a5254-ssh-key\") pod \"56391e5b-15d4-4f61-9d57-6512b28a5254\" (UID: \"56391e5b-15d4-4f61-9d57-6512b28a5254\") " Dec 09 15:49:09 crc kubenswrapper[4716]: I1209 15:49:09.293569 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/56391e5b-15d4-4f61-9d57-6512b28a5254-kube-api-access-lkc5s" (OuterVolumeSpecName: "kube-api-access-lkc5s") pod "56391e5b-15d4-4f61-9d57-6512b28a5254" (UID: "56391e5b-15d4-4f61-9d57-6512b28a5254"). InnerVolumeSpecName "kube-api-access-lkc5s". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:49:09 crc kubenswrapper[4716]: I1209 15:49:09.320077 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56391e5b-15d4-4f61-9d57-6512b28a5254-inventory" (OuterVolumeSpecName: "inventory") pod "56391e5b-15d4-4f61-9d57-6512b28a5254" (UID: "56391e5b-15d4-4f61-9d57-6512b28a5254"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:49:09 crc kubenswrapper[4716]: I1209 15:49:09.320450 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56391e5b-15d4-4f61-9d57-6512b28a5254-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "56391e5b-15d4-4f61-9d57-6512b28a5254" (UID: "56391e5b-15d4-4f61-9d57-6512b28a5254"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:49:09 crc kubenswrapper[4716]: I1209 15:49:09.396953 4716 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/56391e5b-15d4-4f61-9d57-6512b28a5254-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 09 15:49:09 crc kubenswrapper[4716]: I1209 15:49:09.397024 4716 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/56391e5b-15d4-4f61-9d57-6512b28a5254-inventory\") on node \"crc\" DevicePath \"\"" Dec 09 15:49:09 crc kubenswrapper[4716]: I1209 15:49:09.397038 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lkc5s\" (UniqueName: \"kubernetes.io/projected/56391e5b-15d4-4f61-9d57-6512b28a5254-kube-api-access-lkc5s\") on node \"crc\" DevicePath \"\"" Dec 09 15:49:09 crc kubenswrapper[4716]: I1209 15:49:09.683833 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-b8znx" event={"ID":"56391e5b-15d4-4f61-9d57-6512b28a5254","Type":"ContainerDied","Data":"603461ca11ff1f8948de5a56b8920be8fa23f8d75403e5c6510854fd878cd208"} Dec 09 15:49:09 crc kubenswrapper[4716]: I1209 15:49:09.683880 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="603461ca11ff1f8948de5a56b8920be8fa23f8d75403e5c6510854fd878cd208" Dec 09 15:49:09 crc kubenswrapper[4716]: I1209 15:49:09.683964 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-b8znx" Dec 09 15:49:10 crc kubenswrapper[4716]: E1209 15:49:10.217236 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:49:12 crc kubenswrapper[4716]: I1209 15:49:12.681212 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wbvhl"] Dec 09 15:49:12 crc kubenswrapper[4716]: E1209 15:49:12.682122 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9ddafd8-a737-46bc-8c82-d214067d7f0d" containerName="extract-content" Dec 09 15:49:12 crc kubenswrapper[4716]: I1209 15:49:12.682144 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9ddafd8-a737-46bc-8c82-d214067d7f0d" containerName="extract-content" Dec 09 15:49:12 crc kubenswrapper[4716]: E1209 15:49:12.682186 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56391e5b-15d4-4f61-9d57-6512b28a5254" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 15:49:12 crc kubenswrapper[4716]: I1209 15:49:12.682198 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="56391e5b-15d4-4f61-9d57-6512b28a5254" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 15:49:12 crc kubenswrapper[4716]: E1209 15:49:12.682221 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9ddafd8-a737-46bc-8c82-d214067d7f0d" containerName="registry-server" Dec 09 15:49:12 crc kubenswrapper[4716]: I1209 15:49:12.682230 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9ddafd8-a737-46bc-8c82-d214067d7f0d" containerName="registry-server" Dec 09 15:49:12 crc kubenswrapper[4716]: E1209 15:49:12.682263 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9ddafd8-a737-46bc-8c82-d214067d7f0d" containerName="extract-utilities" Dec 09 15:49:12 crc kubenswrapper[4716]: I1209 15:49:12.682271 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9ddafd8-a737-46bc-8c82-d214067d7f0d" containerName="extract-utilities" Dec 09 15:49:12 crc kubenswrapper[4716]: E1209 15:49:12.682300 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d577797-357f-4847-9841-6469587f7285" containerName="collect-profiles" Dec 09 15:49:12 crc kubenswrapper[4716]: I1209 15:49:12.682308 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d577797-357f-4847-9841-6469587f7285" containerName="collect-profiles" Dec 09 15:49:12 crc kubenswrapper[4716]: I1209 15:49:12.682578 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9ddafd8-a737-46bc-8c82-d214067d7f0d" containerName="registry-server" Dec 09 15:49:12 crc kubenswrapper[4716]: I1209 15:49:12.682642 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="56391e5b-15d4-4f61-9d57-6512b28a5254" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 15:49:12 crc kubenswrapper[4716]: I1209 15:49:12.682667 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d577797-357f-4847-9841-6469587f7285" containerName="collect-profiles" Dec 09 15:49:12 crc kubenswrapper[4716]: I1209 15:49:12.684569 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wbvhl" Dec 09 15:49:12 crc kubenswrapper[4716]: I1209 15:49:12.693986 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wbvhl"] Dec 09 15:49:12 crc kubenswrapper[4716]: I1209 15:49:12.819177 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-25cjc\" (UniqueName: \"kubernetes.io/projected/4c46b892-915c-46a6-97ee-6efecb5c5b9e-kube-api-access-25cjc\") pod \"redhat-operators-wbvhl\" (UID: \"4c46b892-915c-46a6-97ee-6efecb5c5b9e\") " pod="openshift-marketplace/redhat-operators-wbvhl" Dec 09 15:49:12 crc kubenswrapper[4716]: I1209 15:49:12.819346 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c46b892-915c-46a6-97ee-6efecb5c5b9e-utilities\") pod \"redhat-operators-wbvhl\" (UID: \"4c46b892-915c-46a6-97ee-6efecb5c5b9e\") " pod="openshift-marketplace/redhat-operators-wbvhl" Dec 09 15:49:12 crc kubenswrapper[4716]: I1209 15:49:12.819838 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c46b892-915c-46a6-97ee-6efecb5c5b9e-catalog-content\") pod \"redhat-operators-wbvhl\" (UID: \"4c46b892-915c-46a6-97ee-6efecb5c5b9e\") " pod="openshift-marketplace/redhat-operators-wbvhl" Dec 09 15:49:12 crc kubenswrapper[4716]: I1209 15:49:12.922468 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c46b892-915c-46a6-97ee-6efecb5c5b9e-catalog-content\") pod \"redhat-operators-wbvhl\" (UID: \"4c46b892-915c-46a6-97ee-6efecb5c5b9e\") " pod="openshift-marketplace/redhat-operators-wbvhl" Dec 09 15:49:12 crc kubenswrapper[4716]: I1209 15:49:12.922854 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-25cjc\" (UniqueName: \"kubernetes.io/projected/4c46b892-915c-46a6-97ee-6efecb5c5b9e-kube-api-access-25cjc\") pod \"redhat-operators-wbvhl\" (UID: \"4c46b892-915c-46a6-97ee-6efecb5c5b9e\") " pod="openshift-marketplace/redhat-operators-wbvhl" Dec 09 15:49:12 crc kubenswrapper[4716]: I1209 15:49:12.922998 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c46b892-915c-46a6-97ee-6efecb5c5b9e-catalog-content\") pod \"redhat-operators-wbvhl\" (UID: \"4c46b892-915c-46a6-97ee-6efecb5c5b9e\") " pod="openshift-marketplace/redhat-operators-wbvhl" Dec 09 15:49:12 crc kubenswrapper[4716]: I1209 15:49:12.923021 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c46b892-915c-46a6-97ee-6efecb5c5b9e-utilities\") pod \"redhat-operators-wbvhl\" (UID: \"4c46b892-915c-46a6-97ee-6efecb5c5b9e\") " pod="openshift-marketplace/redhat-operators-wbvhl" Dec 09 15:49:12 crc kubenswrapper[4716]: I1209 15:49:12.923450 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c46b892-915c-46a6-97ee-6efecb5c5b9e-utilities\") pod \"redhat-operators-wbvhl\" (UID: \"4c46b892-915c-46a6-97ee-6efecb5c5b9e\") " pod="openshift-marketplace/redhat-operators-wbvhl" Dec 09 15:49:12 crc kubenswrapper[4716]: I1209 15:49:12.942953 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-25cjc\" (UniqueName: \"kubernetes.io/projected/4c46b892-915c-46a6-97ee-6efecb5c5b9e-kube-api-access-25cjc\") pod \"redhat-operators-wbvhl\" (UID: \"4c46b892-915c-46a6-97ee-6efecb5c5b9e\") " pod="openshift-marketplace/redhat-operators-wbvhl" Dec 09 15:49:13 crc kubenswrapper[4716]: I1209 15:49:13.023530 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wbvhl" Dec 09 15:49:13 crc kubenswrapper[4716]: I1209 15:49:13.555748 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wbvhl"] Dec 09 15:49:13 crc kubenswrapper[4716]: W1209 15:49:13.556050 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4c46b892_915c_46a6_97ee_6efecb5c5b9e.slice/crio-8717033ad036c05a8129887bc5b10860cb271fe080f1dce45c7c4bc2b2eb44e0 WatchSource:0}: Error finding container 8717033ad036c05a8129887bc5b10860cb271fe080f1dce45c7c4bc2b2eb44e0: Status 404 returned error can't find the container with id 8717033ad036c05a8129887bc5b10860cb271fe080f1dce45c7c4bc2b2eb44e0 Dec 09 15:49:13 crc kubenswrapper[4716]: I1209 15:49:13.737447 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wbvhl" event={"ID":"4c46b892-915c-46a6-97ee-6efecb5c5b9e","Type":"ContainerStarted","Data":"8717033ad036c05a8129887bc5b10860cb271fe080f1dce45c7c4bc2b2eb44e0"} Dec 09 15:49:14 crc kubenswrapper[4716]: I1209 15:49:14.747758 4716 generic.go:334] "Generic (PLEG): container finished" podID="4c46b892-915c-46a6-97ee-6efecb5c5b9e" containerID="d831275479e69ddbe840b8e6c38bac1236818622bf800977b601c2391d233ba4" exitCode=0 Dec 09 15:49:14 crc kubenswrapper[4716]: I1209 15:49:14.747806 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wbvhl" event={"ID":"4c46b892-915c-46a6-97ee-6efecb5c5b9e","Type":"ContainerDied","Data":"d831275479e69ddbe840b8e6c38bac1236818622bf800977b601c2391d233ba4"} Dec 09 15:49:15 crc kubenswrapper[4716]: I1209 15:49:15.759114 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wbvhl" event={"ID":"4c46b892-915c-46a6-97ee-6efecb5c5b9e","Type":"ContainerStarted","Data":"eb3a16cea102e28e74b6e341b0a53bfafc2ddeae20d24eb2b6382503ccba4711"} Dec 09 15:49:16 crc kubenswrapper[4716]: I1209 15:49:16.214355 4716 scope.go:117] "RemoveContainer" containerID="dd82e2eedfe1213cad4ba613e7cdac44ed5c5060200b5e4fcfb856fc8f876bf1" Dec 09 15:49:16 crc kubenswrapper[4716]: E1209 15:49:16.214916 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:49:17 crc kubenswrapper[4716]: I1209 15:49:17.030765 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hrbp4"] Dec 09 15:49:17 crc kubenswrapper[4716]: I1209 15:49:17.032866 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hrbp4" Dec 09 15:49:17 crc kubenswrapper[4716]: I1209 15:49:17.035257 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-vjwcm" Dec 09 15:49:17 crc kubenswrapper[4716]: I1209 15:49:17.036687 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 09 15:49:17 crc kubenswrapper[4716]: I1209 15:49:17.037482 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 09 15:49:17 crc kubenswrapper[4716]: I1209 15:49:17.041304 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 09 15:49:17 crc kubenswrapper[4716]: I1209 15:49:17.051375 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hrbp4"] Dec 09 15:49:17 crc kubenswrapper[4716]: I1209 15:49:17.130505 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7a560de6-cdd4-41b4-af8c-0523cca3eed0-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-hrbp4\" (UID: \"7a560de6-cdd4-41b4-af8c-0523cca3eed0\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hrbp4" Dec 09 15:49:17 crc kubenswrapper[4716]: I1209 15:49:17.130586 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kkq9f\" (UniqueName: \"kubernetes.io/projected/7a560de6-cdd4-41b4-af8c-0523cca3eed0-kube-api-access-kkq9f\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-hrbp4\" (UID: \"7a560de6-cdd4-41b4-af8c-0523cca3eed0\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hrbp4" Dec 09 15:49:17 crc kubenswrapper[4716]: I1209 15:49:17.130728 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7a560de6-cdd4-41b4-af8c-0523cca3eed0-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-hrbp4\" (UID: \"7a560de6-cdd4-41b4-af8c-0523cca3eed0\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hrbp4" Dec 09 15:49:17 crc kubenswrapper[4716]: I1209 15:49:17.233570 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7a560de6-cdd4-41b4-af8c-0523cca3eed0-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-hrbp4\" (UID: \"7a560de6-cdd4-41b4-af8c-0523cca3eed0\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hrbp4" Dec 09 15:49:17 crc kubenswrapper[4716]: I1209 15:49:17.233700 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kkq9f\" (UniqueName: \"kubernetes.io/projected/7a560de6-cdd4-41b4-af8c-0523cca3eed0-kube-api-access-kkq9f\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-hrbp4\" (UID: \"7a560de6-cdd4-41b4-af8c-0523cca3eed0\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hrbp4" Dec 09 15:49:17 crc kubenswrapper[4716]: I1209 15:49:17.233857 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7a560de6-cdd4-41b4-af8c-0523cca3eed0-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-hrbp4\" (UID: \"7a560de6-cdd4-41b4-af8c-0523cca3eed0\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hrbp4" Dec 09 15:49:17 crc kubenswrapper[4716]: I1209 15:49:17.241768 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7a560de6-cdd4-41b4-af8c-0523cca3eed0-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-hrbp4\" (UID: \"7a560de6-cdd4-41b4-af8c-0523cca3eed0\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hrbp4" Dec 09 15:49:17 crc kubenswrapper[4716]: I1209 15:49:17.241844 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7a560de6-cdd4-41b4-af8c-0523cca3eed0-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-hrbp4\" (UID: \"7a560de6-cdd4-41b4-af8c-0523cca3eed0\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hrbp4" Dec 09 15:49:17 crc kubenswrapper[4716]: I1209 15:49:17.299404 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kkq9f\" (UniqueName: \"kubernetes.io/projected/7a560de6-cdd4-41b4-af8c-0523cca3eed0-kube-api-access-kkq9f\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-hrbp4\" (UID: \"7a560de6-cdd4-41b4-af8c-0523cca3eed0\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hrbp4" Dec 09 15:49:17 crc kubenswrapper[4716]: I1209 15:49:17.399274 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hrbp4" Dec 09 15:49:18 crc kubenswrapper[4716]: W1209 15:49:18.198348 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7a560de6_cdd4_41b4_af8c_0523cca3eed0.slice/crio-bdf6000e85838b7163c7f5e3c24a2b4c3cbe005650077ab484dfa040a5f4368a WatchSource:0}: Error finding container bdf6000e85838b7163c7f5e3c24a2b4c3cbe005650077ab484dfa040a5f4368a: Status 404 returned error can't find the container with id bdf6000e85838b7163c7f5e3c24a2b4c3cbe005650077ab484dfa040a5f4368a Dec 09 15:49:18 crc kubenswrapper[4716]: I1209 15:49:18.214350 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hrbp4"] Dec 09 15:49:18 crc kubenswrapper[4716]: I1209 15:49:18.807375 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hrbp4" event={"ID":"7a560de6-cdd4-41b4-af8c-0523cca3eed0","Type":"ContainerStarted","Data":"bdf6000e85838b7163c7f5e3c24a2b4c3cbe005650077ab484dfa040a5f4368a"} Dec 09 15:49:19 crc kubenswrapper[4716]: I1209 15:49:19.824302 4716 generic.go:334] "Generic (PLEG): container finished" podID="4c46b892-915c-46a6-97ee-6efecb5c5b9e" containerID="eb3a16cea102e28e74b6e341b0a53bfafc2ddeae20d24eb2b6382503ccba4711" exitCode=0 Dec 09 15:49:19 crc kubenswrapper[4716]: I1209 15:49:19.824378 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wbvhl" event={"ID":"4c46b892-915c-46a6-97ee-6efecb5c5b9e","Type":"ContainerDied","Data":"eb3a16cea102e28e74b6e341b0a53bfafc2ddeae20d24eb2b6382503ccba4711"} Dec 09 15:49:19 crc kubenswrapper[4716]: I1209 15:49:19.826661 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hrbp4" event={"ID":"7a560de6-cdd4-41b4-af8c-0523cca3eed0","Type":"ContainerStarted","Data":"45ee347511c22be7e2723fb43329dbf963649b6554d1cfa6ac848047bcd8ec7e"} Dec 09 15:49:19 crc kubenswrapper[4716]: I1209 15:49:19.858862 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hrbp4" podStartSLOduration=1.759214769 podStartE2EDuration="2.858839978s" podCreationTimestamp="2025-12-09 15:49:17 +0000 UTC" firstStartedPulling="2025-12-09 15:49:18.201106213 +0000 UTC m=+2445.355850201" lastFinishedPulling="2025-12-09 15:49:19.300731422 +0000 UTC m=+2446.455475410" observedRunningTime="2025-12-09 15:49:19.857132679 +0000 UTC m=+2447.011876667" watchObservedRunningTime="2025-12-09 15:49:19.858839978 +0000 UTC m=+2447.013583966" Dec 09 15:49:20 crc kubenswrapper[4716]: I1209 15:49:20.840064 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wbvhl" event={"ID":"4c46b892-915c-46a6-97ee-6efecb5c5b9e","Type":"ContainerStarted","Data":"0ab5bef8016e6044116ceb78c7d47b2304d63d6b148c1b8c96b66024d65ead22"} Dec 09 15:49:20 crc kubenswrapper[4716]: I1209 15:49:20.868852 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wbvhl" podStartSLOduration=3.321301055 podStartE2EDuration="8.86882022s" podCreationTimestamp="2025-12-09 15:49:12 +0000 UTC" firstStartedPulling="2025-12-09 15:49:14.7499645 +0000 UTC m=+2441.904708488" lastFinishedPulling="2025-12-09 15:49:20.297483665 +0000 UTC m=+2447.452227653" observedRunningTime="2025-12-09 15:49:20.856349513 +0000 UTC m=+2448.011093501" watchObservedRunningTime="2025-12-09 15:49:20.86882022 +0000 UTC m=+2448.023564218" Dec 09 15:49:21 crc kubenswrapper[4716]: E1209 15:49:21.216013 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:49:23 crc kubenswrapper[4716]: I1209 15:49:23.024478 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wbvhl" Dec 09 15:49:23 crc kubenswrapper[4716]: I1209 15:49:23.024821 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wbvhl" Dec 09 15:49:23 crc kubenswrapper[4716]: E1209 15:49:23.224834 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:49:23 crc kubenswrapper[4716]: I1209 15:49:23.497110 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-l22s4"] Dec 09 15:49:23 crc kubenswrapper[4716]: I1209 15:49:23.500293 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l22s4" Dec 09 15:49:23 crc kubenswrapper[4716]: I1209 15:49:23.509929 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-l22s4"] Dec 09 15:49:23 crc kubenswrapper[4716]: I1209 15:49:23.621414 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zxcnm\" (UniqueName: \"kubernetes.io/projected/ecb1340b-873c-4203-8dcd-4b9fc2264d00-kube-api-access-zxcnm\") pod \"certified-operators-l22s4\" (UID: \"ecb1340b-873c-4203-8dcd-4b9fc2264d00\") " pod="openshift-marketplace/certified-operators-l22s4" Dec 09 15:49:23 crc kubenswrapper[4716]: I1209 15:49:23.621566 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ecb1340b-873c-4203-8dcd-4b9fc2264d00-catalog-content\") pod \"certified-operators-l22s4\" (UID: \"ecb1340b-873c-4203-8dcd-4b9fc2264d00\") " pod="openshift-marketplace/certified-operators-l22s4" Dec 09 15:49:23 crc kubenswrapper[4716]: I1209 15:49:23.621680 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ecb1340b-873c-4203-8dcd-4b9fc2264d00-utilities\") pod \"certified-operators-l22s4\" (UID: \"ecb1340b-873c-4203-8dcd-4b9fc2264d00\") " pod="openshift-marketplace/certified-operators-l22s4" Dec 09 15:49:23 crc kubenswrapper[4716]: I1209 15:49:23.724433 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zxcnm\" (UniqueName: \"kubernetes.io/projected/ecb1340b-873c-4203-8dcd-4b9fc2264d00-kube-api-access-zxcnm\") pod \"certified-operators-l22s4\" (UID: \"ecb1340b-873c-4203-8dcd-4b9fc2264d00\") " pod="openshift-marketplace/certified-operators-l22s4" Dec 09 15:49:23 crc kubenswrapper[4716]: I1209 15:49:23.724550 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ecb1340b-873c-4203-8dcd-4b9fc2264d00-catalog-content\") pod \"certified-operators-l22s4\" (UID: \"ecb1340b-873c-4203-8dcd-4b9fc2264d00\") " pod="openshift-marketplace/certified-operators-l22s4" Dec 09 15:49:23 crc kubenswrapper[4716]: I1209 15:49:23.724694 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ecb1340b-873c-4203-8dcd-4b9fc2264d00-utilities\") pod \"certified-operators-l22s4\" (UID: \"ecb1340b-873c-4203-8dcd-4b9fc2264d00\") " pod="openshift-marketplace/certified-operators-l22s4" Dec 09 15:49:23 crc kubenswrapper[4716]: I1209 15:49:23.725240 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ecb1340b-873c-4203-8dcd-4b9fc2264d00-utilities\") pod \"certified-operators-l22s4\" (UID: \"ecb1340b-873c-4203-8dcd-4b9fc2264d00\") " pod="openshift-marketplace/certified-operators-l22s4" Dec 09 15:49:23 crc kubenswrapper[4716]: I1209 15:49:23.725324 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ecb1340b-873c-4203-8dcd-4b9fc2264d00-catalog-content\") pod \"certified-operators-l22s4\" (UID: \"ecb1340b-873c-4203-8dcd-4b9fc2264d00\") " pod="openshift-marketplace/certified-operators-l22s4" Dec 09 15:49:23 crc kubenswrapper[4716]: I1209 15:49:23.748640 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zxcnm\" (UniqueName: \"kubernetes.io/projected/ecb1340b-873c-4203-8dcd-4b9fc2264d00-kube-api-access-zxcnm\") pod \"certified-operators-l22s4\" (UID: \"ecb1340b-873c-4203-8dcd-4b9fc2264d00\") " pod="openshift-marketplace/certified-operators-l22s4" Dec 09 15:49:23 crc kubenswrapper[4716]: I1209 15:49:23.830708 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l22s4" Dec 09 15:49:24 crc kubenswrapper[4716]: I1209 15:49:24.088793 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wbvhl" podUID="4c46b892-915c-46a6-97ee-6efecb5c5b9e" containerName="registry-server" probeResult="failure" output=< Dec 09 15:49:24 crc kubenswrapper[4716]: timeout: failed to connect service ":50051" within 1s Dec 09 15:49:24 crc kubenswrapper[4716]: > Dec 09 15:49:24 crc kubenswrapper[4716]: I1209 15:49:24.420347 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-l22s4"] Dec 09 15:49:24 crc kubenswrapper[4716]: W1209 15:49:24.438408 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podecb1340b_873c_4203_8dcd_4b9fc2264d00.slice/crio-afbddd0eb274610a910e307cfe44fec183ab95cea824908cb27daf64e8883b29 WatchSource:0}: Error finding container afbddd0eb274610a910e307cfe44fec183ab95cea824908cb27daf64e8883b29: Status 404 returned error can't find the container with id afbddd0eb274610a910e307cfe44fec183ab95cea824908cb27daf64e8883b29 Dec 09 15:49:24 crc kubenswrapper[4716]: I1209 15:49:24.901926 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l22s4" event={"ID":"ecb1340b-873c-4203-8dcd-4b9fc2264d00","Type":"ContainerStarted","Data":"afbddd0eb274610a910e307cfe44fec183ab95cea824908cb27daf64e8883b29"} Dec 09 15:49:25 crc kubenswrapper[4716]: I1209 15:49:25.914318 4716 generic.go:334] "Generic (PLEG): container finished" podID="ecb1340b-873c-4203-8dcd-4b9fc2264d00" containerID="d211491304fbb3b1250c7c1b71fa799fe24475ee100d1ce2f3e8de5c1b54ee67" exitCode=0 Dec 09 15:49:25 crc kubenswrapper[4716]: I1209 15:49:25.914370 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l22s4" event={"ID":"ecb1340b-873c-4203-8dcd-4b9fc2264d00","Type":"ContainerDied","Data":"d211491304fbb3b1250c7c1b71fa799fe24475ee100d1ce2f3e8de5c1b54ee67"} Dec 09 15:49:31 crc kubenswrapper[4716]: I1209 15:49:31.214968 4716 scope.go:117] "RemoveContainer" containerID="dd82e2eedfe1213cad4ba613e7cdac44ed5c5060200b5e4fcfb856fc8f876bf1" Dec 09 15:49:31 crc kubenswrapper[4716]: E1209 15:49:31.215893 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:49:33 crc kubenswrapper[4716]: I1209 15:49:33.198313 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l22s4" event={"ID":"ecb1340b-873c-4203-8dcd-4b9fc2264d00","Type":"ContainerStarted","Data":"24fe96fe521b0695cad9be77a0cb7a27fa72adc5822f7e0d943a4fc42ddf79de"} Dec 09 15:49:33 crc kubenswrapper[4716]: I1209 15:49:33.297127 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wbvhl" Dec 09 15:49:33 crc kubenswrapper[4716]: I1209 15:49:33.357320 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wbvhl" Dec 09 15:49:34 crc kubenswrapper[4716]: I1209 15:49:34.482853 4716 generic.go:334] "Generic (PLEG): container finished" podID="ecb1340b-873c-4203-8dcd-4b9fc2264d00" containerID="24fe96fe521b0695cad9be77a0cb7a27fa72adc5822f7e0d943a4fc42ddf79de" exitCode=0 Dec 09 15:49:34 crc kubenswrapper[4716]: I1209 15:49:34.483688 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l22s4" event={"ID":"ecb1340b-873c-4203-8dcd-4b9fc2264d00","Type":"ContainerDied","Data":"24fe96fe521b0695cad9be77a0cb7a27fa72adc5822f7e0d943a4fc42ddf79de"} Dec 09 15:49:34 crc kubenswrapper[4716]: I1209 15:49:34.500033 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wbvhl"] Dec 09 15:49:35 crc kubenswrapper[4716]: E1209 15:49:35.216143 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:49:35 crc kubenswrapper[4716]: I1209 15:49:35.647464 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wbvhl" podUID="4c46b892-915c-46a6-97ee-6efecb5c5b9e" containerName="registry-server" containerID="cri-o://0ab5bef8016e6044116ceb78c7d47b2304d63d6b148c1b8c96b66024d65ead22" gracePeriod=2 Dec 09 15:49:35 crc kubenswrapper[4716]: I1209 15:49:35.647759 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l22s4" event={"ID":"ecb1340b-873c-4203-8dcd-4b9fc2264d00","Type":"ContainerStarted","Data":"00706f05451c66bc326218bdbe1f9e56efc4abea06b85601f8e414b93682e390"} Dec 09 15:49:35 crc kubenswrapper[4716]: I1209 15:49:35.668232 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-l22s4" podStartSLOduration=3.498826246 podStartE2EDuration="12.668192591s" podCreationTimestamp="2025-12-09 15:49:23 +0000 UTC" firstStartedPulling="2025-12-09 15:49:25.917907766 +0000 UTC m=+2453.072651754" lastFinishedPulling="2025-12-09 15:49:35.087274111 +0000 UTC m=+2462.242018099" observedRunningTime="2025-12-09 15:49:35.666676337 +0000 UTC m=+2462.821420335" watchObservedRunningTime="2025-12-09 15:49:35.668192591 +0000 UTC m=+2462.822936579" Dec 09 15:49:36 crc kubenswrapper[4716]: E1209 15:49:36.219161 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:49:36 crc kubenswrapper[4716]: I1209 15:49:36.407651 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wbvhl" Dec 09 15:49:36 crc kubenswrapper[4716]: I1209 15:49:36.446353 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-25cjc\" (UniqueName: \"kubernetes.io/projected/4c46b892-915c-46a6-97ee-6efecb5c5b9e-kube-api-access-25cjc\") pod \"4c46b892-915c-46a6-97ee-6efecb5c5b9e\" (UID: \"4c46b892-915c-46a6-97ee-6efecb5c5b9e\") " Dec 09 15:49:36 crc kubenswrapper[4716]: I1209 15:49:36.446402 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c46b892-915c-46a6-97ee-6efecb5c5b9e-utilities\") pod \"4c46b892-915c-46a6-97ee-6efecb5c5b9e\" (UID: \"4c46b892-915c-46a6-97ee-6efecb5c5b9e\") " Dec 09 15:49:36 crc kubenswrapper[4716]: I1209 15:49:36.446608 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c46b892-915c-46a6-97ee-6efecb5c5b9e-catalog-content\") pod \"4c46b892-915c-46a6-97ee-6efecb5c5b9e\" (UID: \"4c46b892-915c-46a6-97ee-6efecb5c5b9e\") " Dec 09 15:49:36 crc kubenswrapper[4716]: I1209 15:49:36.447716 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c46b892-915c-46a6-97ee-6efecb5c5b9e-utilities" (OuterVolumeSpecName: "utilities") pod "4c46b892-915c-46a6-97ee-6efecb5c5b9e" (UID: "4c46b892-915c-46a6-97ee-6efecb5c5b9e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:49:36 crc kubenswrapper[4716]: I1209 15:49:36.475983 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c46b892-915c-46a6-97ee-6efecb5c5b9e-kube-api-access-25cjc" (OuterVolumeSpecName: "kube-api-access-25cjc") pod "4c46b892-915c-46a6-97ee-6efecb5c5b9e" (UID: "4c46b892-915c-46a6-97ee-6efecb5c5b9e"). InnerVolumeSpecName "kube-api-access-25cjc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:49:36 crc kubenswrapper[4716]: I1209 15:49:36.869858 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-25cjc\" (UniqueName: \"kubernetes.io/projected/4c46b892-915c-46a6-97ee-6efecb5c5b9e-kube-api-access-25cjc\") on node \"crc\" DevicePath \"\"" Dec 09 15:49:36 crc kubenswrapper[4716]: I1209 15:49:36.869959 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c46b892-915c-46a6-97ee-6efecb5c5b9e-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 15:49:36 crc kubenswrapper[4716]: I1209 15:49:36.907103 4716 generic.go:334] "Generic (PLEG): container finished" podID="4c46b892-915c-46a6-97ee-6efecb5c5b9e" containerID="0ab5bef8016e6044116ceb78c7d47b2304d63d6b148c1b8c96b66024d65ead22" exitCode=0 Dec 09 15:49:36 crc kubenswrapper[4716]: I1209 15:49:36.908187 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wbvhl" Dec 09 15:49:36 crc kubenswrapper[4716]: I1209 15:49:36.908128 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wbvhl" event={"ID":"4c46b892-915c-46a6-97ee-6efecb5c5b9e","Type":"ContainerDied","Data":"0ab5bef8016e6044116ceb78c7d47b2304d63d6b148c1b8c96b66024d65ead22"} Dec 09 15:49:36 crc kubenswrapper[4716]: I1209 15:49:36.908914 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wbvhl" event={"ID":"4c46b892-915c-46a6-97ee-6efecb5c5b9e","Type":"ContainerDied","Data":"8717033ad036c05a8129887bc5b10860cb271fe080f1dce45c7c4bc2b2eb44e0"} Dec 09 15:49:36 crc kubenswrapper[4716]: I1209 15:49:36.908959 4716 scope.go:117] "RemoveContainer" containerID="0ab5bef8016e6044116ceb78c7d47b2304d63d6b148c1b8c96b66024d65ead22" Dec 09 15:49:36 crc kubenswrapper[4716]: I1209 15:49:36.948765 4716 scope.go:117] "RemoveContainer" containerID="eb3a16cea102e28e74b6e341b0a53bfafc2ddeae20d24eb2b6382503ccba4711" Dec 09 15:49:36 crc kubenswrapper[4716]: I1209 15:49:36.958057 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c46b892-915c-46a6-97ee-6efecb5c5b9e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4c46b892-915c-46a6-97ee-6efecb5c5b9e" (UID: "4c46b892-915c-46a6-97ee-6efecb5c5b9e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:49:36 crc kubenswrapper[4716]: I1209 15:49:36.981500 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c46b892-915c-46a6-97ee-6efecb5c5b9e-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 15:49:36 crc kubenswrapper[4716]: I1209 15:49:36.983449 4716 scope.go:117] "RemoveContainer" containerID="d831275479e69ddbe840b8e6c38bac1236818622bf800977b601c2391d233ba4" Dec 09 15:49:37 crc kubenswrapper[4716]: I1209 15:49:37.047909 4716 scope.go:117] "RemoveContainer" containerID="0ab5bef8016e6044116ceb78c7d47b2304d63d6b148c1b8c96b66024d65ead22" Dec 09 15:49:37 crc kubenswrapper[4716]: E1209 15:49:37.054306 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0ab5bef8016e6044116ceb78c7d47b2304d63d6b148c1b8c96b66024d65ead22\": container with ID starting with 0ab5bef8016e6044116ceb78c7d47b2304d63d6b148c1b8c96b66024d65ead22 not found: ID does not exist" containerID="0ab5bef8016e6044116ceb78c7d47b2304d63d6b148c1b8c96b66024d65ead22" Dec 09 15:49:37 crc kubenswrapper[4716]: I1209 15:49:37.054374 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ab5bef8016e6044116ceb78c7d47b2304d63d6b148c1b8c96b66024d65ead22"} err="failed to get container status \"0ab5bef8016e6044116ceb78c7d47b2304d63d6b148c1b8c96b66024d65ead22\": rpc error: code = NotFound desc = could not find container \"0ab5bef8016e6044116ceb78c7d47b2304d63d6b148c1b8c96b66024d65ead22\": container with ID starting with 0ab5bef8016e6044116ceb78c7d47b2304d63d6b148c1b8c96b66024d65ead22 not found: ID does not exist" Dec 09 15:49:37 crc kubenswrapper[4716]: I1209 15:49:37.054409 4716 scope.go:117] "RemoveContainer" containerID="eb3a16cea102e28e74b6e341b0a53bfafc2ddeae20d24eb2b6382503ccba4711" Dec 09 15:49:37 crc kubenswrapper[4716]: E1209 15:49:37.054956 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb3a16cea102e28e74b6e341b0a53bfafc2ddeae20d24eb2b6382503ccba4711\": container with ID starting with eb3a16cea102e28e74b6e341b0a53bfafc2ddeae20d24eb2b6382503ccba4711 not found: ID does not exist" containerID="eb3a16cea102e28e74b6e341b0a53bfafc2ddeae20d24eb2b6382503ccba4711" Dec 09 15:49:37 crc kubenswrapper[4716]: I1209 15:49:37.054992 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb3a16cea102e28e74b6e341b0a53bfafc2ddeae20d24eb2b6382503ccba4711"} err="failed to get container status \"eb3a16cea102e28e74b6e341b0a53bfafc2ddeae20d24eb2b6382503ccba4711\": rpc error: code = NotFound desc = could not find container \"eb3a16cea102e28e74b6e341b0a53bfafc2ddeae20d24eb2b6382503ccba4711\": container with ID starting with eb3a16cea102e28e74b6e341b0a53bfafc2ddeae20d24eb2b6382503ccba4711 not found: ID does not exist" Dec 09 15:49:37 crc kubenswrapper[4716]: I1209 15:49:37.055006 4716 scope.go:117] "RemoveContainer" containerID="d831275479e69ddbe840b8e6c38bac1236818622bf800977b601c2391d233ba4" Dec 09 15:49:37 crc kubenswrapper[4716]: E1209 15:49:37.055637 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d831275479e69ddbe840b8e6c38bac1236818622bf800977b601c2391d233ba4\": container with ID starting with d831275479e69ddbe840b8e6c38bac1236818622bf800977b601c2391d233ba4 not found: ID does not exist" containerID="d831275479e69ddbe840b8e6c38bac1236818622bf800977b601c2391d233ba4" Dec 09 15:49:37 crc kubenswrapper[4716]: I1209 15:49:37.055684 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d831275479e69ddbe840b8e6c38bac1236818622bf800977b601c2391d233ba4"} err="failed to get container status \"d831275479e69ddbe840b8e6c38bac1236818622bf800977b601c2391d233ba4\": rpc error: code = NotFound desc = could not find container \"d831275479e69ddbe840b8e6c38bac1236818622bf800977b601c2391d233ba4\": container with ID starting with d831275479e69ddbe840b8e6c38bac1236818622bf800977b601c2391d233ba4 not found: ID does not exist" Dec 09 15:49:37 crc kubenswrapper[4716]: I1209 15:49:37.311888 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wbvhl"] Dec 09 15:49:37 crc kubenswrapper[4716]: I1209 15:49:37.332793 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wbvhl"] Dec 09 15:49:39 crc kubenswrapper[4716]: I1209 15:49:39.332566 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c46b892-915c-46a6-97ee-6efecb5c5b9e" path="/var/lib/kubelet/pods/4c46b892-915c-46a6-97ee-6efecb5c5b9e/volumes" Dec 09 15:49:42 crc kubenswrapper[4716]: I1209 15:49:42.213113 4716 scope.go:117] "RemoveContainer" containerID="dd82e2eedfe1213cad4ba613e7cdac44ed5c5060200b5e4fcfb856fc8f876bf1" Dec 09 15:49:42 crc kubenswrapper[4716]: E1209 15:49:42.213818 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:49:43 crc kubenswrapper[4716]: I1209 15:49:43.831767 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-l22s4" Dec 09 15:49:43 crc kubenswrapper[4716]: I1209 15:49:43.832159 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-l22s4" Dec 09 15:49:43 crc kubenswrapper[4716]: I1209 15:49:43.933068 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-l22s4" Dec 09 15:49:44 crc kubenswrapper[4716]: I1209 15:49:44.033523 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-l22s4" Dec 09 15:49:44 crc kubenswrapper[4716]: I1209 15:49:44.106010 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-l22s4"] Dec 09 15:49:44 crc kubenswrapper[4716]: I1209 15:49:44.200596 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zxsvb"] Dec 09 15:49:44 crc kubenswrapper[4716]: I1209 15:49:44.200990 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-zxsvb" podUID="ef94fdf4-dd68-4fe3-b935-b68c34d7814d" containerName="registry-server" containerID="cri-o://aaaf08acd1620ff8c2b07cc4f0f49cae23735e055e9551eb96c5945b9fa3f8d4" gracePeriod=2 Dec 09 15:49:44 crc kubenswrapper[4716]: I1209 15:49:44.783855 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zxsvb" Dec 09 15:49:44 crc kubenswrapper[4716]: I1209 15:49:44.832390 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r4qrf\" (UniqueName: \"kubernetes.io/projected/ef94fdf4-dd68-4fe3-b935-b68c34d7814d-kube-api-access-r4qrf\") pod \"ef94fdf4-dd68-4fe3-b935-b68c34d7814d\" (UID: \"ef94fdf4-dd68-4fe3-b935-b68c34d7814d\") " Dec 09 15:49:44 crc kubenswrapper[4716]: I1209 15:49:44.833710 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ef94fdf4-dd68-4fe3-b935-b68c34d7814d-utilities\") pod \"ef94fdf4-dd68-4fe3-b935-b68c34d7814d\" (UID: \"ef94fdf4-dd68-4fe3-b935-b68c34d7814d\") " Dec 09 15:49:44 crc kubenswrapper[4716]: I1209 15:49:44.834076 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ef94fdf4-dd68-4fe3-b935-b68c34d7814d-catalog-content\") pod \"ef94fdf4-dd68-4fe3-b935-b68c34d7814d\" (UID: \"ef94fdf4-dd68-4fe3-b935-b68c34d7814d\") " Dec 09 15:49:44 crc kubenswrapper[4716]: I1209 15:49:44.834291 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ef94fdf4-dd68-4fe3-b935-b68c34d7814d-utilities" (OuterVolumeSpecName: "utilities") pod "ef94fdf4-dd68-4fe3-b935-b68c34d7814d" (UID: "ef94fdf4-dd68-4fe3-b935-b68c34d7814d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:49:44 crc kubenswrapper[4716]: I1209 15:49:44.835295 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ef94fdf4-dd68-4fe3-b935-b68c34d7814d-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 15:49:44 crc kubenswrapper[4716]: I1209 15:49:44.848840 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef94fdf4-dd68-4fe3-b935-b68c34d7814d-kube-api-access-r4qrf" (OuterVolumeSpecName: "kube-api-access-r4qrf") pod "ef94fdf4-dd68-4fe3-b935-b68c34d7814d" (UID: "ef94fdf4-dd68-4fe3-b935-b68c34d7814d"). InnerVolumeSpecName "kube-api-access-r4qrf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:49:44 crc kubenswrapper[4716]: I1209 15:49:44.904860 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ef94fdf4-dd68-4fe3-b935-b68c34d7814d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ef94fdf4-dd68-4fe3-b935-b68c34d7814d" (UID: "ef94fdf4-dd68-4fe3-b935-b68c34d7814d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:49:44 crc kubenswrapper[4716]: I1209 15:49:44.937529 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ef94fdf4-dd68-4fe3-b935-b68c34d7814d-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 15:49:44 crc kubenswrapper[4716]: I1209 15:49:44.937568 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r4qrf\" (UniqueName: \"kubernetes.io/projected/ef94fdf4-dd68-4fe3-b935-b68c34d7814d-kube-api-access-r4qrf\") on node \"crc\" DevicePath \"\"" Dec 09 15:49:45 crc kubenswrapper[4716]: I1209 15:49:45.367810 4716 generic.go:334] "Generic (PLEG): container finished" podID="ef94fdf4-dd68-4fe3-b935-b68c34d7814d" containerID="aaaf08acd1620ff8c2b07cc4f0f49cae23735e055e9551eb96c5945b9fa3f8d4" exitCode=0 Dec 09 15:49:45 crc kubenswrapper[4716]: I1209 15:49:45.368534 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zxsvb" Dec 09 15:49:45 crc kubenswrapper[4716]: I1209 15:49:45.604799 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zxsvb" event={"ID":"ef94fdf4-dd68-4fe3-b935-b68c34d7814d","Type":"ContainerDied","Data":"aaaf08acd1620ff8c2b07cc4f0f49cae23735e055e9551eb96c5945b9fa3f8d4"} Dec 09 15:49:45 crc kubenswrapper[4716]: I1209 15:49:45.604857 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zxsvb" event={"ID":"ef94fdf4-dd68-4fe3-b935-b68c34d7814d","Type":"ContainerDied","Data":"014e28478f3d888dcdff9e05108ec36e5ef2eb1051a5aa1d291ced24cfb9568c"} Dec 09 15:49:45 crc kubenswrapper[4716]: I1209 15:49:45.604885 4716 scope.go:117] "RemoveContainer" containerID="aaaf08acd1620ff8c2b07cc4f0f49cae23735e055e9551eb96c5945b9fa3f8d4" Dec 09 15:49:45 crc kubenswrapper[4716]: I1209 15:49:45.638515 4716 scope.go:117] "RemoveContainer" containerID="f861d85dadd4ca00a9e8e2cd8341118ab9aa39f80adbf41555c78dd510548cae" Dec 09 15:49:45 crc kubenswrapper[4716]: I1209 15:49:45.654297 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zxsvb"] Dec 09 15:49:45 crc kubenswrapper[4716]: I1209 15:49:45.677308 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-zxsvb"] Dec 09 15:49:45 crc kubenswrapper[4716]: I1209 15:49:45.678320 4716 scope.go:117] "RemoveContainer" containerID="c08a6f2d8e9bbda4e175bf56479eee7a75790052d0c2db91378bd07f91a85855" Dec 09 15:49:45 crc kubenswrapper[4716]: I1209 15:49:45.737393 4716 scope.go:117] "RemoveContainer" containerID="aaaf08acd1620ff8c2b07cc4f0f49cae23735e055e9551eb96c5945b9fa3f8d4" Dec 09 15:49:45 crc kubenswrapper[4716]: E1209 15:49:45.738156 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aaaf08acd1620ff8c2b07cc4f0f49cae23735e055e9551eb96c5945b9fa3f8d4\": container with ID starting with aaaf08acd1620ff8c2b07cc4f0f49cae23735e055e9551eb96c5945b9fa3f8d4 not found: ID does not exist" containerID="aaaf08acd1620ff8c2b07cc4f0f49cae23735e055e9551eb96c5945b9fa3f8d4" Dec 09 15:49:45 crc kubenswrapper[4716]: I1209 15:49:45.738252 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aaaf08acd1620ff8c2b07cc4f0f49cae23735e055e9551eb96c5945b9fa3f8d4"} err="failed to get container status \"aaaf08acd1620ff8c2b07cc4f0f49cae23735e055e9551eb96c5945b9fa3f8d4\": rpc error: code = NotFound desc = could not find container \"aaaf08acd1620ff8c2b07cc4f0f49cae23735e055e9551eb96c5945b9fa3f8d4\": container with ID starting with aaaf08acd1620ff8c2b07cc4f0f49cae23735e055e9551eb96c5945b9fa3f8d4 not found: ID does not exist" Dec 09 15:49:45 crc kubenswrapper[4716]: I1209 15:49:45.738290 4716 scope.go:117] "RemoveContainer" containerID="f861d85dadd4ca00a9e8e2cd8341118ab9aa39f80adbf41555c78dd510548cae" Dec 09 15:49:45 crc kubenswrapper[4716]: E1209 15:49:45.739074 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f861d85dadd4ca00a9e8e2cd8341118ab9aa39f80adbf41555c78dd510548cae\": container with ID starting with f861d85dadd4ca00a9e8e2cd8341118ab9aa39f80adbf41555c78dd510548cae not found: ID does not exist" containerID="f861d85dadd4ca00a9e8e2cd8341118ab9aa39f80adbf41555c78dd510548cae" Dec 09 15:49:45 crc kubenswrapper[4716]: I1209 15:49:45.739111 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f861d85dadd4ca00a9e8e2cd8341118ab9aa39f80adbf41555c78dd510548cae"} err="failed to get container status \"f861d85dadd4ca00a9e8e2cd8341118ab9aa39f80adbf41555c78dd510548cae\": rpc error: code = NotFound desc = could not find container \"f861d85dadd4ca00a9e8e2cd8341118ab9aa39f80adbf41555c78dd510548cae\": container with ID starting with f861d85dadd4ca00a9e8e2cd8341118ab9aa39f80adbf41555c78dd510548cae not found: ID does not exist" Dec 09 15:49:45 crc kubenswrapper[4716]: I1209 15:49:45.739129 4716 scope.go:117] "RemoveContainer" containerID="c08a6f2d8e9bbda4e175bf56479eee7a75790052d0c2db91378bd07f91a85855" Dec 09 15:49:45 crc kubenswrapper[4716]: E1209 15:49:45.739451 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c08a6f2d8e9bbda4e175bf56479eee7a75790052d0c2db91378bd07f91a85855\": container with ID starting with c08a6f2d8e9bbda4e175bf56479eee7a75790052d0c2db91378bd07f91a85855 not found: ID does not exist" containerID="c08a6f2d8e9bbda4e175bf56479eee7a75790052d0c2db91378bd07f91a85855" Dec 09 15:49:45 crc kubenswrapper[4716]: I1209 15:49:45.739481 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c08a6f2d8e9bbda4e175bf56479eee7a75790052d0c2db91378bd07f91a85855"} err="failed to get container status \"c08a6f2d8e9bbda4e175bf56479eee7a75790052d0c2db91378bd07f91a85855\": rpc error: code = NotFound desc = could not find container \"c08a6f2d8e9bbda4e175bf56479eee7a75790052d0c2db91378bd07f91a85855\": container with ID starting with c08a6f2d8e9bbda4e175bf56479eee7a75790052d0c2db91378bd07f91a85855 not found: ID does not exist" Dec 09 15:49:46 crc kubenswrapper[4716]: E1209 15:49:46.215631 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:49:47 crc kubenswrapper[4716]: E1209 15:49:47.215795 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:49:47 crc kubenswrapper[4716]: I1209 15:49:47.229944 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ef94fdf4-dd68-4fe3-b935-b68c34d7814d" path="/var/lib/kubelet/pods/ef94fdf4-dd68-4fe3-b935-b68c34d7814d/volumes" Dec 09 15:49:53 crc kubenswrapper[4716]: I1209 15:49:53.221889 4716 scope.go:117] "RemoveContainer" containerID="dd82e2eedfe1213cad4ba613e7cdac44ed5c5060200b5e4fcfb856fc8f876bf1" Dec 09 15:49:53 crc kubenswrapper[4716]: E1209 15:49:53.222766 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:49:58 crc kubenswrapper[4716]: E1209 15:49:58.215700 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:50:00 crc kubenswrapper[4716]: E1209 15:50:00.216724 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:50:06 crc kubenswrapper[4716]: I1209 15:50:06.214150 4716 scope.go:117] "RemoveContainer" containerID="dd82e2eedfe1213cad4ba613e7cdac44ed5c5060200b5e4fcfb856fc8f876bf1" Dec 09 15:50:06 crc kubenswrapper[4716]: E1209 15:50:06.215145 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:50:10 crc kubenswrapper[4716]: E1209 15:50:10.216481 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:50:13 crc kubenswrapper[4716]: E1209 15:50:13.226002 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:50:20 crc kubenswrapper[4716]: I1209 15:50:20.213287 4716 scope.go:117] "RemoveContainer" containerID="dd82e2eedfe1213cad4ba613e7cdac44ed5c5060200b5e4fcfb856fc8f876bf1" Dec 09 15:50:20 crc kubenswrapper[4716]: E1209 15:50:20.214131 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:50:22 crc kubenswrapper[4716]: E1209 15:50:22.216481 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:50:24 crc kubenswrapper[4716]: E1209 15:50:24.220848 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:50:35 crc kubenswrapper[4716]: I1209 15:50:35.157290 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-q7j7r"] Dec 09 15:50:35 crc kubenswrapper[4716]: E1209 15:50:35.159526 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef94fdf4-dd68-4fe3-b935-b68c34d7814d" containerName="extract-utilities" Dec 09 15:50:35 crc kubenswrapper[4716]: I1209 15:50:35.159650 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef94fdf4-dd68-4fe3-b935-b68c34d7814d" containerName="extract-utilities" Dec 09 15:50:35 crc kubenswrapper[4716]: E1209 15:50:35.159726 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c46b892-915c-46a6-97ee-6efecb5c5b9e" containerName="extract-utilities" Dec 09 15:50:35 crc kubenswrapper[4716]: I1209 15:50:35.159797 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c46b892-915c-46a6-97ee-6efecb5c5b9e" containerName="extract-utilities" Dec 09 15:50:35 crc kubenswrapper[4716]: E1209 15:50:35.159901 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef94fdf4-dd68-4fe3-b935-b68c34d7814d" containerName="extract-content" Dec 09 15:50:35 crc kubenswrapper[4716]: I1209 15:50:35.160000 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef94fdf4-dd68-4fe3-b935-b68c34d7814d" containerName="extract-content" Dec 09 15:50:35 crc kubenswrapper[4716]: E1209 15:50:35.160092 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c46b892-915c-46a6-97ee-6efecb5c5b9e" containerName="extract-content" Dec 09 15:50:35 crc kubenswrapper[4716]: I1209 15:50:35.160161 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c46b892-915c-46a6-97ee-6efecb5c5b9e" containerName="extract-content" Dec 09 15:50:35 crc kubenswrapper[4716]: E1209 15:50:35.160241 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef94fdf4-dd68-4fe3-b935-b68c34d7814d" containerName="registry-server" Dec 09 15:50:35 crc kubenswrapper[4716]: I1209 15:50:35.160299 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef94fdf4-dd68-4fe3-b935-b68c34d7814d" containerName="registry-server" Dec 09 15:50:35 crc kubenswrapper[4716]: E1209 15:50:35.160374 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c46b892-915c-46a6-97ee-6efecb5c5b9e" containerName="registry-server" Dec 09 15:50:35 crc kubenswrapper[4716]: I1209 15:50:35.160434 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c46b892-915c-46a6-97ee-6efecb5c5b9e" containerName="registry-server" Dec 09 15:50:35 crc kubenswrapper[4716]: I1209 15:50:35.160905 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef94fdf4-dd68-4fe3-b935-b68c34d7814d" containerName="registry-server" Dec 09 15:50:35 crc kubenswrapper[4716]: I1209 15:50:35.161013 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c46b892-915c-46a6-97ee-6efecb5c5b9e" containerName="registry-server" Dec 09 15:50:35 crc kubenswrapper[4716]: I1209 15:50:35.163027 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q7j7r" Dec 09 15:50:35 crc kubenswrapper[4716]: I1209 15:50:35.172327 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-q7j7r"] Dec 09 15:50:35 crc kubenswrapper[4716]: I1209 15:50:35.215327 4716 scope.go:117] "RemoveContainer" containerID="dd82e2eedfe1213cad4ba613e7cdac44ed5c5060200b5e4fcfb856fc8f876bf1" Dec 09 15:50:35 crc kubenswrapper[4716]: E1209 15:50:35.215656 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:50:35 crc kubenswrapper[4716]: E1209 15:50:35.217224 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:50:35 crc kubenswrapper[4716]: I1209 15:50:35.223967 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70-catalog-content\") pod \"community-operators-q7j7r\" (UID: \"5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70\") " pod="openshift-marketplace/community-operators-q7j7r" Dec 09 15:50:35 crc kubenswrapper[4716]: I1209 15:50:35.224015 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mktnv\" (UniqueName: \"kubernetes.io/projected/5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70-kube-api-access-mktnv\") pod \"community-operators-q7j7r\" (UID: \"5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70\") " pod="openshift-marketplace/community-operators-q7j7r" Dec 09 15:50:35 crc kubenswrapper[4716]: I1209 15:50:35.224127 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70-utilities\") pod \"community-operators-q7j7r\" (UID: \"5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70\") " pod="openshift-marketplace/community-operators-q7j7r" Dec 09 15:50:35 crc kubenswrapper[4716]: I1209 15:50:35.326320 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70-utilities\") pod \"community-operators-q7j7r\" (UID: \"5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70\") " pod="openshift-marketplace/community-operators-q7j7r" Dec 09 15:50:35 crc kubenswrapper[4716]: I1209 15:50:35.326782 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70-utilities\") pod \"community-operators-q7j7r\" (UID: \"5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70\") " pod="openshift-marketplace/community-operators-q7j7r" Dec 09 15:50:35 crc kubenswrapper[4716]: I1209 15:50:35.326813 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70-catalog-content\") pod \"community-operators-q7j7r\" (UID: \"5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70\") " pod="openshift-marketplace/community-operators-q7j7r" Dec 09 15:50:35 crc kubenswrapper[4716]: I1209 15:50:35.326853 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mktnv\" (UniqueName: \"kubernetes.io/projected/5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70-kube-api-access-mktnv\") pod \"community-operators-q7j7r\" (UID: \"5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70\") " pod="openshift-marketplace/community-operators-q7j7r" Dec 09 15:50:35 crc kubenswrapper[4716]: I1209 15:50:35.327359 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70-catalog-content\") pod \"community-operators-q7j7r\" (UID: \"5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70\") " pod="openshift-marketplace/community-operators-q7j7r" Dec 09 15:50:35 crc kubenswrapper[4716]: I1209 15:50:35.357252 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mktnv\" (UniqueName: \"kubernetes.io/projected/5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70-kube-api-access-mktnv\") pod \"community-operators-q7j7r\" (UID: \"5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70\") " pod="openshift-marketplace/community-operators-q7j7r" Dec 09 15:50:35 crc kubenswrapper[4716]: I1209 15:50:35.494353 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q7j7r" Dec 09 15:50:36 crc kubenswrapper[4716]: I1209 15:50:36.065442 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-q7j7r"] Dec 09 15:50:36 crc kubenswrapper[4716]: W1209 15:50:36.068944 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5cd3c3a2_5aaf_4ab9_9330_24a2e5850b70.slice/crio-c5ce95760e3de6a0cc8fdac68994d78ae7f687d6efbc8015980eacf0f58844c2 WatchSource:0}: Error finding container c5ce95760e3de6a0cc8fdac68994d78ae7f687d6efbc8015980eacf0f58844c2: Status 404 returned error can't find the container with id c5ce95760e3de6a0cc8fdac68994d78ae7f687d6efbc8015980eacf0f58844c2 Dec 09 15:50:36 crc kubenswrapper[4716]: I1209 15:50:36.326422 4716 generic.go:334] "Generic (PLEG): container finished" podID="5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70" containerID="de4b33e7aadde4a4e12144a9bd02a90e992dbb252fc08c2f2c4f8e4ee4d0230d" exitCode=0 Dec 09 15:50:36 crc kubenswrapper[4716]: I1209 15:50:36.326500 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q7j7r" event={"ID":"5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70","Type":"ContainerDied","Data":"de4b33e7aadde4a4e12144a9bd02a90e992dbb252fc08c2f2c4f8e4ee4d0230d"} Dec 09 15:50:36 crc kubenswrapper[4716]: I1209 15:50:36.326870 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q7j7r" event={"ID":"5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70","Type":"ContainerStarted","Data":"c5ce95760e3de6a0cc8fdac68994d78ae7f687d6efbc8015980eacf0f58844c2"} Dec 09 15:50:37 crc kubenswrapper[4716]: E1209 15:50:37.267476 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:50:38 crc kubenswrapper[4716]: I1209 15:50:38.370033 4716 generic.go:334] "Generic (PLEG): container finished" podID="5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70" containerID="5faafc54a5bed0cb15836e10ccf0f829168d205224b56b42947c0e68c603163a" exitCode=0 Dec 09 15:50:38 crc kubenswrapper[4716]: I1209 15:50:38.370116 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q7j7r" event={"ID":"5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70","Type":"ContainerDied","Data":"5faafc54a5bed0cb15836e10ccf0f829168d205224b56b42947c0e68c603163a"} Dec 09 15:50:39 crc kubenswrapper[4716]: I1209 15:50:39.388278 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q7j7r" event={"ID":"5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70","Type":"ContainerStarted","Data":"cb3e19de771066b5e78f6023c02f58a2bcc94e60783367f5067dc7490d886398"} Dec 09 15:50:39 crc kubenswrapper[4716]: I1209 15:50:39.416728 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-q7j7r" podStartSLOduration=1.86227857 podStartE2EDuration="4.41670679s" podCreationTimestamp="2025-12-09 15:50:35 +0000 UTC" firstStartedPulling="2025-12-09 15:50:36.328685104 +0000 UTC m=+2523.483429102" lastFinishedPulling="2025-12-09 15:50:38.883113334 +0000 UTC m=+2526.037857322" observedRunningTime="2025-12-09 15:50:39.406918366 +0000 UTC m=+2526.561662364" watchObservedRunningTime="2025-12-09 15:50:39.41670679 +0000 UTC m=+2526.571450778" Dec 09 15:50:45 crc kubenswrapper[4716]: I1209 15:50:45.495756 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-q7j7r" Dec 09 15:50:45 crc kubenswrapper[4716]: I1209 15:50:45.496419 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-q7j7r" Dec 09 15:50:45 crc kubenswrapper[4716]: I1209 15:50:45.545158 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-q7j7r" Dec 09 15:50:46 crc kubenswrapper[4716]: I1209 15:50:46.213864 4716 scope.go:117] "RemoveContainer" containerID="dd82e2eedfe1213cad4ba613e7cdac44ed5c5060200b5e4fcfb856fc8f876bf1" Dec 09 15:50:46 crc kubenswrapper[4716]: E1209 15:50:46.214315 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:50:46 crc kubenswrapper[4716]: I1209 15:50:46.503012 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-q7j7r" Dec 09 15:50:46 crc kubenswrapper[4716]: I1209 15:50:46.641261 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-q7j7r"] Dec 09 15:50:47 crc kubenswrapper[4716]: E1209 15:50:47.216953 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:50:48 crc kubenswrapper[4716]: I1209 15:50:48.473376 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-q7j7r" podUID="5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70" containerName="registry-server" containerID="cri-o://cb3e19de771066b5e78f6023c02f58a2bcc94e60783367f5067dc7490d886398" gracePeriod=2 Dec 09 15:50:49 crc kubenswrapper[4716]: E1209 15:50:49.217562 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:50:49 crc kubenswrapper[4716]: I1209 15:50:49.457382 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q7j7r" Dec 09 15:50:49 crc kubenswrapper[4716]: I1209 15:50:49.489318 4716 generic.go:334] "Generic (PLEG): container finished" podID="5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70" containerID="cb3e19de771066b5e78f6023c02f58a2bcc94e60783367f5067dc7490d886398" exitCode=0 Dec 09 15:50:49 crc kubenswrapper[4716]: I1209 15:50:49.489427 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q7j7r" event={"ID":"5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70","Type":"ContainerDied","Data":"cb3e19de771066b5e78f6023c02f58a2bcc94e60783367f5067dc7490d886398"} Dec 09 15:50:49 crc kubenswrapper[4716]: I1209 15:50:49.489465 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q7j7r" event={"ID":"5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70","Type":"ContainerDied","Data":"c5ce95760e3de6a0cc8fdac68994d78ae7f687d6efbc8015980eacf0f58844c2"} Dec 09 15:50:49 crc kubenswrapper[4716]: I1209 15:50:49.489489 4716 scope.go:117] "RemoveContainer" containerID="cb3e19de771066b5e78f6023c02f58a2bcc94e60783367f5067dc7490d886398" Dec 09 15:50:49 crc kubenswrapper[4716]: I1209 15:50:49.489704 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q7j7r" Dec 09 15:50:49 crc kubenswrapper[4716]: I1209 15:50:49.508816 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70-catalog-content\") pod \"5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70\" (UID: \"5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70\") " Dec 09 15:50:49 crc kubenswrapper[4716]: I1209 15:50:49.509164 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70-utilities\") pod \"5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70\" (UID: \"5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70\") " Dec 09 15:50:49 crc kubenswrapper[4716]: I1209 15:50:49.509225 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mktnv\" (UniqueName: \"kubernetes.io/projected/5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70-kube-api-access-mktnv\") pod \"5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70\" (UID: \"5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70\") " Dec 09 15:50:49 crc kubenswrapper[4716]: I1209 15:50:49.523760 4716 scope.go:117] "RemoveContainer" containerID="5faafc54a5bed0cb15836e10ccf0f829168d205224b56b42947c0e68c603163a" Dec 09 15:50:49 crc kubenswrapper[4716]: I1209 15:50:49.526307 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70-utilities" (OuterVolumeSpecName: "utilities") pod "5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70" (UID: "5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:50:49 crc kubenswrapper[4716]: I1209 15:50:49.533561 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70-kube-api-access-mktnv" (OuterVolumeSpecName: "kube-api-access-mktnv") pod "5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70" (UID: "5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70"). InnerVolumeSpecName "kube-api-access-mktnv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:50:49 crc kubenswrapper[4716]: I1209 15:50:49.594969 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70" (UID: "5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:50:49 crc kubenswrapper[4716]: I1209 15:50:49.600977 4716 scope.go:117] "RemoveContainer" containerID="de4b33e7aadde4a4e12144a9bd02a90e992dbb252fc08c2f2c4f8e4ee4d0230d" Dec 09 15:50:49 crc kubenswrapper[4716]: I1209 15:50:49.612160 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 15:50:49 crc kubenswrapper[4716]: I1209 15:50:49.612190 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 15:50:49 crc kubenswrapper[4716]: I1209 15:50:49.612205 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mktnv\" (UniqueName: \"kubernetes.io/projected/5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70-kube-api-access-mktnv\") on node \"crc\" DevicePath \"\"" Dec 09 15:50:49 crc kubenswrapper[4716]: I1209 15:50:49.655240 4716 scope.go:117] "RemoveContainer" containerID="cb3e19de771066b5e78f6023c02f58a2bcc94e60783367f5067dc7490d886398" Dec 09 15:50:49 crc kubenswrapper[4716]: E1209 15:50:49.655922 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cb3e19de771066b5e78f6023c02f58a2bcc94e60783367f5067dc7490d886398\": container with ID starting with cb3e19de771066b5e78f6023c02f58a2bcc94e60783367f5067dc7490d886398 not found: ID does not exist" containerID="cb3e19de771066b5e78f6023c02f58a2bcc94e60783367f5067dc7490d886398" Dec 09 15:50:49 crc kubenswrapper[4716]: I1209 15:50:49.655969 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb3e19de771066b5e78f6023c02f58a2bcc94e60783367f5067dc7490d886398"} err="failed to get container status \"cb3e19de771066b5e78f6023c02f58a2bcc94e60783367f5067dc7490d886398\": rpc error: code = NotFound desc = could not find container \"cb3e19de771066b5e78f6023c02f58a2bcc94e60783367f5067dc7490d886398\": container with ID starting with cb3e19de771066b5e78f6023c02f58a2bcc94e60783367f5067dc7490d886398 not found: ID does not exist" Dec 09 15:50:49 crc kubenswrapper[4716]: I1209 15:50:49.656002 4716 scope.go:117] "RemoveContainer" containerID="5faafc54a5bed0cb15836e10ccf0f829168d205224b56b42947c0e68c603163a" Dec 09 15:50:49 crc kubenswrapper[4716]: E1209 15:50:49.656570 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5faafc54a5bed0cb15836e10ccf0f829168d205224b56b42947c0e68c603163a\": container with ID starting with 5faafc54a5bed0cb15836e10ccf0f829168d205224b56b42947c0e68c603163a not found: ID does not exist" containerID="5faafc54a5bed0cb15836e10ccf0f829168d205224b56b42947c0e68c603163a" Dec 09 15:50:49 crc kubenswrapper[4716]: I1209 15:50:49.656615 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5faafc54a5bed0cb15836e10ccf0f829168d205224b56b42947c0e68c603163a"} err="failed to get container status \"5faafc54a5bed0cb15836e10ccf0f829168d205224b56b42947c0e68c603163a\": rpc error: code = NotFound desc = could not find container \"5faafc54a5bed0cb15836e10ccf0f829168d205224b56b42947c0e68c603163a\": container with ID starting with 5faafc54a5bed0cb15836e10ccf0f829168d205224b56b42947c0e68c603163a not found: ID does not exist" Dec 09 15:50:49 crc kubenswrapper[4716]: I1209 15:50:49.656763 4716 scope.go:117] "RemoveContainer" containerID="de4b33e7aadde4a4e12144a9bd02a90e992dbb252fc08c2f2c4f8e4ee4d0230d" Dec 09 15:50:49 crc kubenswrapper[4716]: E1209 15:50:49.657634 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"de4b33e7aadde4a4e12144a9bd02a90e992dbb252fc08c2f2c4f8e4ee4d0230d\": container with ID starting with de4b33e7aadde4a4e12144a9bd02a90e992dbb252fc08c2f2c4f8e4ee4d0230d not found: ID does not exist" containerID="de4b33e7aadde4a4e12144a9bd02a90e992dbb252fc08c2f2c4f8e4ee4d0230d" Dec 09 15:50:49 crc kubenswrapper[4716]: I1209 15:50:49.657695 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de4b33e7aadde4a4e12144a9bd02a90e992dbb252fc08c2f2c4f8e4ee4d0230d"} err="failed to get container status \"de4b33e7aadde4a4e12144a9bd02a90e992dbb252fc08c2f2c4f8e4ee4d0230d\": rpc error: code = NotFound desc = could not find container \"de4b33e7aadde4a4e12144a9bd02a90e992dbb252fc08c2f2c4f8e4ee4d0230d\": container with ID starting with de4b33e7aadde4a4e12144a9bd02a90e992dbb252fc08c2f2c4f8e4ee4d0230d not found: ID does not exist" Dec 09 15:50:49 crc kubenswrapper[4716]: I1209 15:50:49.827058 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-q7j7r"] Dec 09 15:50:49 crc kubenswrapper[4716]: I1209 15:50:49.840396 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-q7j7r"] Dec 09 15:50:51 crc kubenswrapper[4716]: I1209 15:50:51.227986 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70" path="/var/lib/kubelet/pods/5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70/volumes" Dec 09 15:50:58 crc kubenswrapper[4716]: E1209 15:50:58.216094 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:51:00 crc kubenswrapper[4716]: E1209 15:51:00.216992 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:51:01 crc kubenswrapper[4716]: I1209 15:51:01.214671 4716 scope.go:117] "RemoveContainer" containerID="dd82e2eedfe1213cad4ba613e7cdac44ed5c5060200b5e4fcfb856fc8f876bf1" Dec 09 15:51:01 crc kubenswrapper[4716]: E1209 15:51:01.215540 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:51:13 crc kubenswrapper[4716]: E1209 15:51:13.240026 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:51:13 crc kubenswrapper[4716]: E1209 15:51:13.241068 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:51:16 crc kubenswrapper[4716]: I1209 15:51:16.214999 4716 scope.go:117] "RemoveContainer" containerID="dd82e2eedfe1213cad4ba613e7cdac44ed5c5060200b5e4fcfb856fc8f876bf1" Dec 09 15:51:16 crc kubenswrapper[4716]: E1209 15:51:16.215596 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:51:24 crc kubenswrapper[4716]: E1209 15:51:24.216097 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:51:24 crc kubenswrapper[4716]: E1209 15:51:24.216121 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:51:29 crc kubenswrapper[4716]: I1209 15:51:29.214773 4716 scope.go:117] "RemoveContainer" containerID="dd82e2eedfe1213cad4ba613e7cdac44ed5c5060200b5e4fcfb856fc8f876bf1" Dec 09 15:51:29 crc kubenswrapper[4716]: E1209 15:51:29.215514 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:51:35 crc kubenswrapper[4716]: E1209 15:51:35.218285 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:51:35 crc kubenswrapper[4716]: E1209 15:51:35.218754 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:51:44 crc kubenswrapper[4716]: I1209 15:51:44.213278 4716 scope.go:117] "RemoveContainer" containerID="dd82e2eedfe1213cad4ba613e7cdac44ed5c5060200b5e4fcfb856fc8f876bf1" Dec 09 15:51:44 crc kubenswrapper[4716]: E1209 15:51:44.214154 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:51:46 crc kubenswrapper[4716]: E1209 15:51:46.216754 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:51:49 crc kubenswrapper[4716]: E1209 15:51:49.217113 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:51:55 crc kubenswrapper[4716]: I1209 15:51:55.214412 4716 scope.go:117] "RemoveContainer" containerID="dd82e2eedfe1213cad4ba613e7cdac44ed5c5060200b5e4fcfb856fc8f876bf1" Dec 09 15:51:55 crc kubenswrapper[4716]: E1209 15:51:55.215727 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:52:00 crc kubenswrapper[4716]: E1209 15:52:00.220376 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:52:02 crc kubenswrapper[4716]: E1209 15:52:02.216208 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:52:07 crc kubenswrapper[4716]: I1209 15:52:07.214069 4716 scope.go:117] "RemoveContainer" containerID="dd82e2eedfe1213cad4ba613e7cdac44ed5c5060200b5e4fcfb856fc8f876bf1" Dec 09 15:52:07 crc kubenswrapper[4716]: E1209 15:52:07.215097 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:52:12 crc kubenswrapper[4716]: E1209 15:52:12.215255 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:52:14 crc kubenswrapper[4716]: E1209 15:52:14.215297 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:52:21 crc kubenswrapper[4716]: I1209 15:52:21.214195 4716 scope.go:117] "RemoveContainer" containerID="dd82e2eedfe1213cad4ba613e7cdac44ed5c5060200b5e4fcfb856fc8f876bf1" Dec 09 15:52:21 crc kubenswrapper[4716]: E1209 15:52:21.215132 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:52:27 crc kubenswrapper[4716]: E1209 15:52:27.216426 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:52:29 crc kubenswrapper[4716]: E1209 15:52:29.216583 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:52:34 crc kubenswrapper[4716]: I1209 15:52:34.214110 4716 scope.go:117] "RemoveContainer" containerID="dd82e2eedfe1213cad4ba613e7cdac44ed5c5060200b5e4fcfb856fc8f876bf1" Dec 09 15:52:34 crc kubenswrapper[4716]: E1209 15:52:34.214983 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:52:39 crc kubenswrapper[4716]: E1209 15:52:39.216681 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:52:40 crc kubenswrapper[4716]: E1209 15:52:40.215543 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:52:46 crc kubenswrapper[4716]: I1209 15:52:46.214001 4716 scope.go:117] "RemoveContainer" containerID="dd82e2eedfe1213cad4ba613e7cdac44ed5c5060200b5e4fcfb856fc8f876bf1" Dec 09 15:52:46 crc kubenswrapper[4716]: E1209 15:52:46.214893 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:52:50 crc kubenswrapper[4716]: E1209 15:52:50.215303 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:52:55 crc kubenswrapper[4716]: E1209 15:52:55.218219 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:52:57 crc kubenswrapper[4716]: I1209 15:52:57.215217 4716 scope.go:117] "RemoveContainer" containerID="dd82e2eedfe1213cad4ba613e7cdac44ed5c5060200b5e4fcfb856fc8f876bf1" Dec 09 15:52:57 crc kubenswrapper[4716]: E1209 15:52:57.215788 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:53:04 crc kubenswrapper[4716]: E1209 15:53:04.215295 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:53:09 crc kubenswrapper[4716]: E1209 15:53:09.216577 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:53:10 crc kubenswrapper[4716]: I1209 15:53:10.214668 4716 scope.go:117] "RemoveContainer" containerID="dd82e2eedfe1213cad4ba613e7cdac44ed5c5060200b5e4fcfb856fc8f876bf1" Dec 09 15:53:10 crc kubenswrapper[4716]: E1209 15:53:10.215762 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:53:15 crc kubenswrapper[4716]: I1209 15:53:15.215686 4716 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 09 15:53:15 crc kubenswrapper[4716]: E1209 15:53:15.349372 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 09 15:53:15 crc kubenswrapper[4716]: E1209 15:53:15.349448 4716 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 09 15:53:15 crc kubenswrapper[4716]: E1209 15:53:15.349604 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qfm49,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-msshl_openstack(239c4119-944c-46e1-9425-285eeb6e0204): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 15:53:15 crc kubenswrapper[4716]: E1209 15:53:15.350973 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:53:21 crc kubenswrapper[4716]: I1209 15:53:21.214892 4716 scope.go:117] "RemoveContainer" containerID="dd82e2eedfe1213cad4ba613e7cdac44ed5c5060200b5e4fcfb856fc8f876bf1" Dec 09 15:53:21 crc kubenswrapper[4716]: E1209 15:53:21.215782 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:53:24 crc kubenswrapper[4716]: E1209 15:53:24.346424 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 15:53:24 crc kubenswrapper[4716]: E1209 15:53:24.346917 4716 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 15:53:24 crc kubenswrapper[4716]: E1209 15:53:24.347124 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5fbh9fh77h667h669h94h589h59dh5bdh5bch9fh68h689h699h559h549h56fh5f5h68fh557h5cch7fh66ch6ch86h56h75h55fh58h5ffhf9h5fdq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lz724,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(29cebb2d-8cdb-49de-a29d-1d02808e46a9): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 15:53:24 crc kubenswrapper[4716]: E1209 15:53:24.348383 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:53:30 crc kubenswrapper[4716]: E1209 15:53:30.216457 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:53:36 crc kubenswrapper[4716]: I1209 15:53:36.213668 4716 scope.go:117] "RemoveContainer" containerID="dd82e2eedfe1213cad4ba613e7cdac44ed5c5060200b5e4fcfb856fc8f876bf1" Dec 09 15:53:36 crc kubenswrapper[4716]: E1209 15:53:36.214525 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 15:53:37 crc kubenswrapper[4716]: E1209 15:53:37.216209 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:53:43 crc kubenswrapper[4716]: E1209 15:53:43.225367 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:53:48 crc kubenswrapper[4716]: E1209 15:53:48.217094 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:53:51 crc kubenswrapper[4716]: I1209 15:53:51.213512 4716 scope.go:117] "RemoveContainer" containerID="dd82e2eedfe1213cad4ba613e7cdac44ed5c5060200b5e4fcfb856fc8f876bf1" Dec 09 15:53:52 crc kubenswrapper[4716]: I1209 15:53:52.496044 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerStarted","Data":"c2891ea50ddc12e555d59873e9e677f4240628d25330d260c59e12bf97ac78c4"} Dec 09 15:53:58 crc kubenswrapper[4716]: E1209 15:53:58.216656 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:54:02 crc kubenswrapper[4716]: E1209 15:54:02.217574 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:54:09 crc kubenswrapper[4716]: E1209 15:54:09.216220 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:54:15 crc kubenswrapper[4716]: E1209 15:54:15.216598 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:54:24 crc kubenswrapper[4716]: E1209 15:54:24.216407 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:54:26 crc kubenswrapper[4716]: E1209 15:54:26.216572 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:54:36 crc kubenswrapper[4716]: E1209 15:54:36.216833 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:54:39 crc kubenswrapper[4716]: E1209 15:54:39.216474 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:54:48 crc kubenswrapper[4716]: E1209 15:54:48.217388 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:54:54 crc kubenswrapper[4716]: E1209 15:54:54.216882 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:54:59 crc kubenswrapper[4716]: E1209 15:54:59.217606 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:55:05 crc kubenswrapper[4716]: E1209 15:55:05.216959 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:55:13 crc kubenswrapper[4716]: E1209 15:55:13.222056 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:55:19 crc kubenswrapper[4716]: E1209 15:55:19.216807 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:55:28 crc kubenswrapper[4716]: E1209 15:55:28.219505 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:55:30 crc kubenswrapper[4716]: E1209 15:55:30.223426 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:55:40 crc kubenswrapper[4716]: E1209 15:55:40.215584 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:55:41 crc kubenswrapper[4716]: E1209 15:55:41.218337 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:55:43 crc kubenswrapper[4716]: I1209 15:55:43.690257 4716 generic.go:334] "Generic (PLEG): container finished" podID="7a560de6-cdd4-41b4-af8c-0523cca3eed0" containerID="45ee347511c22be7e2723fb43329dbf963649b6554d1cfa6ac848047bcd8ec7e" exitCode=2 Dec 09 15:55:43 crc kubenswrapper[4716]: I1209 15:55:43.690335 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hrbp4" event={"ID":"7a560de6-cdd4-41b4-af8c-0523cca3eed0","Type":"ContainerDied","Data":"45ee347511c22be7e2723fb43329dbf963649b6554d1cfa6ac848047bcd8ec7e"} Dec 09 15:55:45 crc kubenswrapper[4716]: I1209 15:55:45.172702 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hrbp4" Dec 09 15:55:45 crc kubenswrapper[4716]: I1209 15:55:45.256707 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7a560de6-cdd4-41b4-af8c-0523cca3eed0-inventory\") pod \"7a560de6-cdd4-41b4-af8c-0523cca3eed0\" (UID: \"7a560de6-cdd4-41b4-af8c-0523cca3eed0\") " Dec 09 15:55:45 crc kubenswrapper[4716]: I1209 15:55:45.256987 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kkq9f\" (UniqueName: \"kubernetes.io/projected/7a560de6-cdd4-41b4-af8c-0523cca3eed0-kube-api-access-kkq9f\") pod \"7a560de6-cdd4-41b4-af8c-0523cca3eed0\" (UID: \"7a560de6-cdd4-41b4-af8c-0523cca3eed0\") " Dec 09 15:55:45 crc kubenswrapper[4716]: I1209 15:55:45.257226 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7a560de6-cdd4-41b4-af8c-0523cca3eed0-ssh-key\") pod \"7a560de6-cdd4-41b4-af8c-0523cca3eed0\" (UID: \"7a560de6-cdd4-41b4-af8c-0523cca3eed0\") " Dec 09 15:55:45 crc kubenswrapper[4716]: I1209 15:55:45.263869 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a560de6-cdd4-41b4-af8c-0523cca3eed0-kube-api-access-kkq9f" (OuterVolumeSpecName: "kube-api-access-kkq9f") pod "7a560de6-cdd4-41b4-af8c-0523cca3eed0" (UID: "7a560de6-cdd4-41b4-af8c-0523cca3eed0"). InnerVolumeSpecName "kube-api-access-kkq9f". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:55:45 crc kubenswrapper[4716]: I1209 15:55:45.288815 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a560de6-cdd4-41b4-af8c-0523cca3eed0-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "7a560de6-cdd4-41b4-af8c-0523cca3eed0" (UID: "7a560de6-cdd4-41b4-af8c-0523cca3eed0"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:55:45 crc kubenswrapper[4716]: I1209 15:55:45.292486 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a560de6-cdd4-41b4-af8c-0523cca3eed0-inventory" (OuterVolumeSpecName: "inventory") pod "7a560de6-cdd4-41b4-af8c-0523cca3eed0" (UID: "7a560de6-cdd4-41b4-af8c-0523cca3eed0"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 15:55:45 crc kubenswrapper[4716]: I1209 15:55:45.362127 4716 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7a560de6-cdd4-41b4-af8c-0523cca3eed0-inventory\") on node \"crc\" DevicePath \"\"" Dec 09 15:55:45 crc kubenswrapper[4716]: I1209 15:55:45.362169 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kkq9f\" (UniqueName: \"kubernetes.io/projected/7a560de6-cdd4-41b4-af8c-0523cca3eed0-kube-api-access-kkq9f\") on node \"crc\" DevicePath \"\"" Dec 09 15:55:45 crc kubenswrapper[4716]: I1209 15:55:45.362185 4716 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7a560de6-cdd4-41b4-af8c-0523cca3eed0-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 09 15:55:45 crc kubenswrapper[4716]: I1209 15:55:45.713447 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hrbp4" event={"ID":"7a560de6-cdd4-41b4-af8c-0523cca3eed0","Type":"ContainerDied","Data":"bdf6000e85838b7163c7f5e3c24a2b4c3cbe005650077ab484dfa040a5f4368a"} Dec 09 15:55:45 crc kubenswrapper[4716]: I1209 15:55:45.713496 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bdf6000e85838b7163c7f5e3c24a2b4c3cbe005650077ab484dfa040a5f4368a" Dec 09 15:55:45 crc kubenswrapper[4716]: I1209 15:55:45.713511 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hrbp4" Dec 09 15:55:52 crc kubenswrapper[4716]: I1209 15:55:52.690516 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-l6hh8"] Dec 09 15:55:52 crc kubenswrapper[4716]: E1209 15:55:52.691711 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70" containerName="extract-content" Dec 09 15:55:52 crc kubenswrapper[4716]: I1209 15:55:52.691733 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70" containerName="extract-content" Dec 09 15:55:52 crc kubenswrapper[4716]: E1209 15:55:52.691749 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a560de6-cdd4-41b4-af8c-0523cca3eed0" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 15:55:52 crc kubenswrapper[4716]: I1209 15:55:52.691757 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a560de6-cdd4-41b4-af8c-0523cca3eed0" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 15:55:52 crc kubenswrapper[4716]: E1209 15:55:52.691784 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70" containerName="extract-utilities" Dec 09 15:55:52 crc kubenswrapper[4716]: I1209 15:55:52.691790 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70" containerName="extract-utilities" Dec 09 15:55:52 crc kubenswrapper[4716]: E1209 15:55:52.691821 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70" containerName="registry-server" Dec 09 15:55:52 crc kubenswrapper[4716]: I1209 15:55:52.691827 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70" containerName="registry-server" Dec 09 15:55:52 crc kubenswrapper[4716]: I1209 15:55:52.692089 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a560de6-cdd4-41b4-af8c-0523cca3eed0" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 15:55:52 crc kubenswrapper[4716]: I1209 15:55:52.692113 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="5cd3c3a2-5aaf-4ab9-9330-24a2e5850b70" containerName="registry-server" Dec 09 15:55:52 crc kubenswrapper[4716]: I1209 15:55:52.694024 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l6hh8" Dec 09 15:55:52 crc kubenswrapper[4716]: I1209 15:55:52.704648 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-l6hh8"] Dec 09 15:55:52 crc kubenswrapper[4716]: I1209 15:55:52.724312 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ccedd6c-8a0d-49ca-b2a4-59697f842648-catalog-content\") pod \"redhat-marketplace-l6hh8\" (UID: \"4ccedd6c-8a0d-49ca-b2a4-59697f842648\") " pod="openshift-marketplace/redhat-marketplace-l6hh8" Dec 09 15:55:52 crc kubenswrapper[4716]: I1209 15:55:52.724443 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ccedd6c-8a0d-49ca-b2a4-59697f842648-utilities\") pod \"redhat-marketplace-l6hh8\" (UID: \"4ccedd6c-8a0d-49ca-b2a4-59697f842648\") " pod="openshift-marketplace/redhat-marketplace-l6hh8" Dec 09 15:55:52 crc kubenswrapper[4716]: I1209 15:55:52.724582 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-px4g9\" (UniqueName: \"kubernetes.io/projected/4ccedd6c-8a0d-49ca-b2a4-59697f842648-kube-api-access-px4g9\") pod \"redhat-marketplace-l6hh8\" (UID: \"4ccedd6c-8a0d-49ca-b2a4-59697f842648\") " pod="openshift-marketplace/redhat-marketplace-l6hh8" Dec 09 15:55:52 crc kubenswrapper[4716]: I1209 15:55:52.825875 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-px4g9\" (UniqueName: \"kubernetes.io/projected/4ccedd6c-8a0d-49ca-b2a4-59697f842648-kube-api-access-px4g9\") pod \"redhat-marketplace-l6hh8\" (UID: \"4ccedd6c-8a0d-49ca-b2a4-59697f842648\") " pod="openshift-marketplace/redhat-marketplace-l6hh8" Dec 09 15:55:52 crc kubenswrapper[4716]: I1209 15:55:52.826128 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ccedd6c-8a0d-49ca-b2a4-59697f842648-catalog-content\") pod \"redhat-marketplace-l6hh8\" (UID: \"4ccedd6c-8a0d-49ca-b2a4-59697f842648\") " pod="openshift-marketplace/redhat-marketplace-l6hh8" Dec 09 15:55:52 crc kubenswrapper[4716]: I1209 15:55:52.826266 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ccedd6c-8a0d-49ca-b2a4-59697f842648-utilities\") pod \"redhat-marketplace-l6hh8\" (UID: \"4ccedd6c-8a0d-49ca-b2a4-59697f842648\") " pod="openshift-marketplace/redhat-marketplace-l6hh8" Dec 09 15:55:52 crc kubenswrapper[4716]: I1209 15:55:52.826867 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ccedd6c-8a0d-49ca-b2a4-59697f842648-catalog-content\") pod \"redhat-marketplace-l6hh8\" (UID: \"4ccedd6c-8a0d-49ca-b2a4-59697f842648\") " pod="openshift-marketplace/redhat-marketplace-l6hh8" Dec 09 15:55:52 crc kubenswrapper[4716]: I1209 15:55:52.826917 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ccedd6c-8a0d-49ca-b2a4-59697f842648-utilities\") pod \"redhat-marketplace-l6hh8\" (UID: \"4ccedd6c-8a0d-49ca-b2a4-59697f842648\") " pod="openshift-marketplace/redhat-marketplace-l6hh8" Dec 09 15:55:52 crc kubenswrapper[4716]: I1209 15:55:52.997862 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-px4g9\" (UniqueName: \"kubernetes.io/projected/4ccedd6c-8a0d-49ca-b2a4-59697f842648-kube-api-access-px4g9\") pod \"redhat-marketplace-l6hh8\" (UID: \"4ccedd6c-8a0d-49ca-b2a4-59697f842648\") " pod="openshift-marketplace/redhat-marketplace-l6hh8" Dec 09 15:55:53 crc kubenswrapper[4716]: I1209 15:55:53.024907 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l6hh8" Dec 09 15:55:53 crc kubenswrapper[4716]: E1209 15:55:53.233378 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:55:53 crc kubenswrapper[4716]: I1209 15:55:53.530221 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-l6hh8"] Dec 09 15:55:53 crc kubenswrapper[4716]: I1209 15:55:53.817678 4716 generic.go:334] "Generic (PLEG): container finished" podID="4ccedd6c-8a0d-49ca-b2a4-59697f842648" containerID="f6329e8898dcf8975ce6b4d217193fd3b004d0922542c804dc9c893ee5c0339b" exitCode=0 Dec 09 15:55:53 crc kubenswrapper[4716]: I1209 15:55:53.817731 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l6hh8" event={"ID":"4ccedd6c-8a0d-49ca-b2a4-59697f842648","Type":"ContainerDied","Data":"f6329e8898dcf8975ce6b4d217193fd3b004d0922542c804dc9c893ee5c0339b"} Dec 09 15:55:53 crc kubenswrapper[4716]: I1209 15:55:53.817970 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l6hh8" event={"ID":"4ccedd6c-8a0d-49ca-b2a4-59697f842648","Type":"ContainerStarted","Data":"0928e2a38c65359eafabefb22430a95139046c3529e7b23f35f796470433bee1"} Dec 09 15:55:54 crc kubenswrapper[4716]: E1209 15:55:54.219317 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:55:54 crc kubenswrapper[4716]: I1209 15:55:54.836064 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l6hh8" event={"ID":"4ccedd6c-8a0d-49ca-b2a4-59697f842648","Type":"ContainerStarted","Data":"bc1c942a9ac3d42001b55c8b3461509c664d5c7d120cc4e41ab312324075978d"} Dec 09 15:55:56 crc kubenswrapper[4716]: I1209 15:55:56.071165 4716 generic.go:334] "Generic (PLEG): container finished" podID="4ccedd6c-8a0d-49ca-b2a4-59697f842648" containerID="bc1c942a9ac3d42001b55c8b3461509c664d5c7d120cc4e41ab312324075978d" exitCode=0 Dec 09 15:55:56 crc kubenswrapper[4716]: I1209 15:55:56.071235 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l6hh8" event={"ID":"4ccedd6c-8a0d-49ca-b2a4-59697f842648","Type":"ContainerDied","Data":"bc1c942a9ac3d42001b55c8b3461509c664d5c7d120cc4e41ab312324075978d"} Dec 09 15:55:57 crc kubenswrapper[4716]: I1209 15:55:57.086890 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l6hh8" event={"ID":"4ccedd6c-8a0d-49ca-b2a4-59697f842648","Type":"ContainerStarted","Data":"c8fa6da2d910f9c4a67fd0671c910d568c4f5df03523748a2e3d14aa65989acd"} Dec 09 15:55:57 crc kubenswrapper[4716]: I1209 15:55:57.110854 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-l6hh8" podStartSLOduration=2.271044056 podStartE2EDuration="5.110772264s" podCreationTimestamp="2025-12-09 15:55:52 +0000 UTC" firstStartedPulling="2025-12-09 15:55:53.819285301 +0000 UTC m=+2840.974029289" lastFinishedPulling="2025-12-09 15:55:56.659013509 +0000 UTC m=+2843.813757497" observedRunningTime="2025-12-09 15:55:57.10181242 +0000 UTC m=+2844.256556418" watchObservedRunningTime="2025-12-09 15:55:57.110772264 +0000 UTC m=+2844.265516242" Dec 09 15:56:03 crc kubenswrapper[4716]: I1209 15:56:03.026077 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-l6hh8" Dec 09 15:56:03 crc kubenswrapper[4716]: I1209 15:56:03.026869 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-l6hh8" Dec 09 15:56:03 crc kubenswrapper[4716]: I1209 15:56:03.038044 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mzt6l"] Dec 09 15:56:03 crc kubenswrapper[4716]: I1209 15:56:03.040668 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mzt6l" Dec 09 15:56:03 crc kubenswrapper[4716]: I1209 15:56:03.043192 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-vjwcm" Dec 09 15:56:03 crc kubenswrapper[4716]: I1209 15:56:03.046454 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 09 15:56:03 crc kubenswrapper[4716]: I1209 15:56:03.047177 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 09 15:56:03 crc kubenswrapper[4716]: I1209 15:56:03.341006 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 09 15:56:03 crc kubenswrapper[4716]: I1209 15:56:03.414245 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mzt6l"] Dec 09 15:56:03 crc kubenswrapper[4716]: I1209 15:56:03.435805 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-l6hh8" Dec 09 15:56:03 crc kubenswrapper[4716]: I1209 15:56:03.472097 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ssvzx\" (UniqueName: \"kubernetes.io/projected/9b993fbb-2885-48c8-83b7-9e07287e790e-kube-api-access-ssvzx\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-mzt6l\" (UID: \"9b993fbb-2885-48c8-83b7-9e07287e790e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mzt6l" Dec 09 15:56:03 crc kubenswrapper[4716]: I1209 15:56:03.472757 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9b993fbb-2885-48c8-83b7-9e07287e790e-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-mzt6l\" (UID: \"9b993fbb-2885-48c8-83b7-9e07287e790e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mzt6l" Dec 09 15:56:03 crc kubenswrapper[4716]: I1209 15:56:03.473478 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9b993fbb-2885-48c8-83b7-9e07287e790e-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-mzt6l\" (UID: \"9b993fbb-2885-48c8-83b7-9e07287e790e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mzt6l" Dec 09 15:56:03 crc kubenswrapper[4716]: I1209 15:56:03.486615 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-l6hh8" Dec 09 15:56:03 crc kubenswrapper[4716]: I1209 15:56:03.575805 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9b993fbb-2885-48c8-83b7-9e07287e790e-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-mzt6l\" (UID: \"9b993fbb-2885-48c8-83b7-9e07287e790e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mzt6l" Dec 09 15:56:03 crc kubenswrapper[4716]: I1209 15:56:03.576035 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9b993fbb-2885-48c8-83b7-9e07287e790e-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-mzt6l\" (UID: \"9b993fbb-2885-48c8-83b7-9e07287e790e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mzt6l" Dec 09 15:56:03 crc kubenswrapper[4716]: I1209 15:56:03.576108 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ssvzx\" (UniqueName: \"kubernetes.io/projected/9b993fbb-2885-48c8-83b7-9e07287e790e-kube-api-access-ssvzx\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-mzt6l\" (UID: \"9b993fbb-2885-48c8-83b7-9e07287e790e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mzt6l" Dec 09 15:56:03 crc kubenswrapper[4716]: I1209 15:56:03.582297 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9b993fbb-2885-48c8-83b7-9e07287e790e-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-mzt6l\" (UID: \"9b993fbb-2885-48c8-83b7-9e07287e790e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mzt6l" Dec 09 15:56:03 crc kubenswrapper[4716]: I1209 15:56:03.582538 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9b993fbb-2885-48c8-83b7-9e07287e790e-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-mzt6l\" (UID: \"9b993fbb-2885-48c8-83b7-9e07287e790e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mzt6l" Dec 09 15:56:03 crc kubenswrapper[4716]: I1209 15:56:03.746013 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ssvzx\" (UniqueName: \"kubernetes.io/projected/9b993fbb-2885-48c8-83b7-9e07287e790e-kube-api-access-ssvzx\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-mzt6l\" (UID: \"9b993fbb-2885-48c8-83b7-9e07287e790e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mzt6l" Dec 09 15:56:03 crc kubenswrapper[4716]: I1209 15:56:03.753004 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-l6hh8"] Dec 09 15:56:04 crc kubenswrapper[4716]: I1209 15:56:04.014730 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mzt6l" Dec 09 15:56:04 crc kubenswrapper[4716]: I1209 15:56:04.618176 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mzt6l"] Dec 09 15:56:04 crc kubenswrapper[4716]: W1209 15:56:04.621245 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9b993fbb_2885_48c8_83b7_9e07287e790e.slice/crio-0b0adf113e85b29d93e776f4e5e992b33c51d97fab24b6be860a95bdb4e9c10c WatchSource:0}: Error finding container 0b0adf113e85b29d93e776f4e5e992b33c51d97fab24b6be860a95bdb4e9c10c: Status 404 returned error can't find the container with id 0b0adf113e85b29d93e776f4e5e992b33c51d97fab24b6be860a95bdb4e9c10c Dec 09 15:56:05 crc kubenswrapper[4716]: I1209 15:56:05.398492 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mzt6l" event={"ID":"9b993fbb-2885-48c8-83b7-9e07287e790e","Type":"ContainerStarted","Data":"0b0adf113e85b29d93e776f4e5e992b33c51d97fab24b6be860a95bdb4e9c10c"} Dec 09 15:56:05 crc kubenswrapper[4716]: I1209 15:56:05.398752 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-l6hh8" podUID="4ccedd6c-8a0d-49ca-b2a4-59697f842648" containerName="registry-server" containerID="cri-o://c8fa6da2d910f9c4a67fd0671c910d568c4f5df03523748a2e3d14aa65989acd" gracePeriod=2 Dec 09 15:56:05 crc kubenswrapper[4716]: I1209 15:56:05.916529 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l6hh8" Dec 09 15:56:06 crc kubenswrapper[4716]: I1209 15:56:06.089984 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-px4g9\" (UniqueName: \"kubernetes.io/projected/4ccedd6c-8a0d-49ca-b2a4-59697f842648-kube-api-access-px4g9\") pod \"4ccedd6c-8a0d-49ca-b2a4-59697f842648\" (UID: \"4ccedd6c-8a0d-49ca-b2a4-59697f842648\") " Dec 09 15:56:06 crc kubenswrapper[4716]: I1209 15:56:06.090227 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ccedd6c-8a0d-49ca-b2a4-59697f842648-utilities\") pod \"4ccedd6c-8a0d-49ca-b2a4-59697f842648\" (UID: \"4ccedd6c-8a0d-49ca-b2a4-59697f842648\") " Dec 09 15:56:06 crc kubenswrapper[4716]: I1209 15:56:06.090700 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ccedd6c-8a0d-49ca-b2a4-59697f842648-catalog-content\") pod \"4ccedd6c-8a0d-49ca-b2a4-59697f842648\" (UID: \"4ccedd6c-8a0d-49ca-b2a4-59697f842648\") " Dec 09 15:56:06 crc kubenswrapper[4716]: I1209 15:56:06.091673 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ccedd6c-8a0d-49ca-b2a4-59697f842648-utilities" (OuterVolumeSpecName: "utilities") pod "4ccedd6c-8a0d-49ca-b2a4-59697f842648" (UID: "4ccedd6c-8a0d-49ca-b2a4-59697f842648"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:56:06 crc kubenswrapper[4716]: I1209 15:56:06.098574 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ccedd6c-8a0d-49ca-b2a4-59697f842648-kube-api-access-px4g9" (OuterVolumeSpecName: "kube-api-access-px4g9") pod "4ccedd6c-8a0d-49ca-b2a4-59697f842648" (UID: "4ccedd6c-8a0d-49ca-b2a4-59697f842648"). InnerVolumeSpecName "kube-api-access-px4g9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:56:06 crc kubenswrapper[4716]: I1209 15:56:06.115074 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ccedd6c-8a0d-49ca-b2a4-59697f842648-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4ccedd6c-8a0d-49ca-b2a4-59697f842648" (UID: "4ccedd6c-8a0d-49ca-b2a4-59697f842648"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:56:06 crc kubenswrapper[4716]: I1209 15:56:06.194556 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ccedd6c-8a0d-49ca-b2a4-59697f842648-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 15:56:06 crc kubenswrapper[4716]: I1209 15:56:06.194603 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ccedd6c-8a0d-49ca-b2a4-59697f842648-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 15:56:06 crc kubenswrapper[4716]: I1209 15:56:06.194643 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-px4g9\" (UniqueName: \"kubernetes.io/projected/4ccedd6c-8a0d-49ca-b2a4-59697f842648-kube-api-access-px4g9\") on node \"crc\" DevicePath \"\"" Dec 09 15:56:06 crc kubenswrapper[4716]: I1209 15:56:06.413570 4716 generic.go:334] "Generic (PLEG): container finished" podID="4ccedd6c-8a0d-49ca-b2a4-59697f842648" containerID="c8fa6da2d910f9c4a67fd0671c910d568c4f5df03523748a2e3d14aa65989acd" exitCode=0 Dec 09 15:56:06 crc kubenswrapper[4716]: I1209 15:56:06.413654 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l6hh8" event={"ID":"4ccedd6c-8a0d-49ca-b2a4-59697f842648","Type":"ContainerDied","Data":"c8fa6da2d910f9c4a67fd0671c910d568c4f5df03523748a2e3d14aa65989acd"} Dec 09 15:56:06 crc kubenswrapper[4716]: I1209 15:56:06.413702 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l6hh8" Dec 09 15:56:06 crc kubenswrapper[4716]: I1209 15:56:06.414122 4716 scope.go:117] "RemoveContainer" containerID="c8fa6da2d910f9c4a67fd0671c910d568c4f5df03523748a2e3d14aa65989acd" Dec 09 15:56:06 crc kubenswrapper[4716]: I1209 15:56:06.414066 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l6hh8" event={"ID":"4ccedd6c-8a0d-49ca-b2a4-59697f842648","Type":"ContainerDied","Data":"0928e2a38c65359eafabefb22430a95139046c3529e7b23f35f796470433bee1"} Dec 09 15:56:06 crc kubenswrapper[4716]: I1209 15:56:06.417808 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mzt6l" event={"ID":"9b993fbb-2885-48c8-83b7-9e07287e790e","Type":"ContainerStarted","Data":"d4c711377723caac7497e0dd7c8e056dc04c1a0ba08a02b3720c3f72977f0877"} Dec 09 15:56:06 crc kubenswrapper[4716]: I1209 15:56:06.805761 4716 scope.go:117] "RemoveContainer" containerID="bc1c942a9ac3d42001b55c8b3461509c664d5c7d120cc4e41ab312324075978d" Dec 09 15:56:06 crc kubenswrapper[4716]: I1209 15:56:06.817486 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mzt6l" podStartSLOduration=2.885697858 podStartE2EDuration="3.817460878s" podCreationTimestamp="2025-12-09 15:56:03 +0000 UTC" firstStartedPulling="2025-12-09 15:56:04.627455966 +0000 UTC m=+2851.782199954" lastFinishedPulling="2025-12-09 15:56:05.559218986 +0000 UTC m=+2852.713962974" observedRunningTime="2025-12-09 15:56:06.800487777 +0000 UTC m=+2853.955231765" watchObservedRunningTime="2025-12-09 15:56:06.817460878 +0000 UTC m=+2853.972204866" Dec 09 15:56:06 crc kubenswrapper[4716]: I1209 15:56:06.850505 4716 scope.go:117] "RemoveContainer" containerID="f6329e8898dcf8975ce6b4d217193fd3b004d0922542c804dc9c893ee5c0339b" Dec 09 15:56:06 crc kubenswrapper[4716]: I1209 15:56:06.854108 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-l6hh8"] Dec 09 15:56:06 crc kubenswrapper[4716]: I1209 15:56:06.865781 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-l6hh8"] Dec 09 15:56:06 crc kubenswrapper[4716]: I1209 15:56:06.901744 4716 scope.go:117] "RemoveContainer" containerID="c8fa6da2d910f9c4a67fd0671c910d568c4f5df03523748a2e3d14aa65989acd" Dec 09 15:56:06 crc kubenswrapper[4716]: E1209 15:56:06.902319 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c8fa6da2d910f9c4a67fd0671c910d568c4f5df03523748a2e3d14aa65989acd\": container with ID starting with c8fa6da2d910f9c4a67fd0671c910d568c4f5df03523748a2e3d14aa65989acd not found: ID does not exist" containerID="c8fa6da2d910f9c4a67fd0671c910d568c4f5df03523748a2e3d14aa65989acd" Dec 09 15:56:06 crc kubenswrapper[4716]: I1209 15:56:06.902469 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c8fa6da2d910f9c4a67fd0671c910d568c4f5df03523748a2e3d14aa65989acd"} err="failed to get container status \"c8fa6da2d910f9c4a67fd0671c910d568c4f5df03523748a2e3d14aa65989acd\": rpc error: code = NotFound desc = could not find container \"c8fa6da2d910f9c4a67fd0671c910d568c4f5df03523748a2e3d14aa65989acd\": container with ID starting with c8fa6da2d910f9c4a67fd0671c910d568c4f5df03523748a2e3d14aa65989acd not found: ID does not exist" Dec 09 15:56:06 crc kubenswrapper[4716]: I1209 15:56:06.902574 4716 scope.go:117] "RemoveContainer" containerID="bc1c942a9ac3d42001b55c8b3461509c664d5c7d120cc4e41ab312324075978d" Dec 09 15:56:06 crc kubenswrapper[4716]: E1209 15:56:06.903340 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bc1c942a9ac3d42001b55c8b3461509c664d5c7d120cc4e41ab312324075978d\": container with ID starting with bc1c942a9ac3d42001b55c8b3461509c664d5c7d120cc4e41ab312324075978d not found: ID does not exist" containerID="bc1c942a9ac3d42001b55c8b3461509c664d5c7d120cc4e41ab312324075978d" Dec 09 15:56:06 crc kubenswrapper[4716]: I1209 15:56:06.903379 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bc1c942a9ac3d42001b55c8b3461509c664d5c7d120cc4e41ab312324075978d"} err="failed to get container status \"bc1c942a9ac3d42001b55c8b3461509c664d5c7d120cc4e41ab312324075978d\": rpc error: code = NotFound desc = could not find container \"bc1c942a9ac3d42001b55c8b3461509c664d5c7d120cc4e41ab312324075978d\": container with ID starting with bc1c942a9ac3d42001b55c8b3461509c664d5c7d120cc4e41ab312324075978d not found: ID does not exist" Dec 09 15:56:06 crc kubenswrapper[4716]: I1209 15:56:06.903497 4716 scope.go:117] "RemoveContainer" containerID="f6329e8898dcf8975ce6b4d217193fd3b004d0922542c804dc9c893ee5c0339b" Dec 09 15:56:06 crc kubenswrapper[4716]: E1209 15:56:06.903859 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f6329e8898dcf8975ce6b4d217193fd3b004d0922542c804dc9c893ee5c0339b\": container with ID starting with f6329e8898dcf8975ce6b4d217193fd3b004d0922542c804dc9c893ee5c0339b not found: ID does not exist" containerID="f6329e8898dcf8975ce6b4d217193fd3b004d0922542c804dc9c893ee5c0339b" Dec 09 15:56:06 crc kubenswrapper[4716]: I1209 15:56:06.903954 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f6329e8898dcf8975ce6b4d217193fd3b004d0922542c804dc9c893ee5c0339b"} err="failed to get container status \"f6329e8898dcf8975ce6b4d217193fd3b004d0922542c804dc9c893ee5c0339b\": rpc error: code = NotFound desc = could not find container \"f6329e8898dcf8975ce6b4d217193fd3b004d0922542c804dc9c893ee5c0339b\": container with ID starting with f6329e8898dcf8975ce6b4d217193fd3b004d0922542c804dc9c893ee5c0339b not found: ID does not exist" Dec 09 15:56:07 crc kubenswrapper[4716]: E1209 15:56:07.217484 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:56:07 crc kubenswrapper[4716]: I1209 15:56:07.231457 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ccedd6c-8a0d-49ca-b2a4-59697f842648" path="/var/lib/kubelet/pods/4ccedd6c-8a0d-49ca-b2a4-59697f842648/volumes" Dec 09 15:56:08 crc kubenswrapper[4716]: E1209 15:56:08.216617 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:56:17 crc kubenswrapper[4716]: I1209 15:56:17.921899 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 15:56:17 crc kubenswrapper[4716]: I1209 15:56:17.922512 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 15:56:20 crc kubenswrapper[4716]: E1209 15:56:20.216990 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:56:20 crc kubenswrapper[4716]: E1209 15:56:20.216990 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:56:31 crc kubenswrapper[4716]: E1209 15:56:31.217134 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:56:33 crc kubenswrapper[4716]: E1209 15:56:33.257755 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:56:44 crc kubenswrapper[4716]: E1209 15:56:44.217866 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:56:45 crc kubenswrapper[4716]: E1209 15:56:45.215521 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:56:47 crc kubenswrapper[4716]: I1209 15:56:47.921951 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 15:56:47 crc kubenswrapper[4716]: I1209 15:56:47.922786 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 15:56:55 crc kubenswrapper[4716]: E1209 15:56:55.216729 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:56:57 crc kubenswrapper[4716]: E1209 15:56:57.216484 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:57:07 crc kubenswrapper[4716]: E1209 15:57:07.219340 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:57:11 crc kubenswrapper[4716]: E1209 15:57:11.223433 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:57:17 crc kubenswrapper[4716]: I1209 15:57:17.922211 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 15:57:17 crc kubenswrapper[4716]: I1209 15:57:17.922994 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 15:57:17 crc kubenswrapper[4716]: I1209 15:57:17.923037 4716 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" Dec 09 15:57:17 crc kubenswrapper[4716]: I1209 15:57:17.924131 4716 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c2891ea50ddc12e555d59873e9e677f4240628d25330d260c59e12bf97ac78c4"} pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 15:57:17 crc kubenswrapper[4716]: I1209 15:57:17.924181 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" containerID="cri-o://c2891ea50ddc12e555d59873e9e677f4240628d25330d260c59e12bf97ac78c4" gracePeriod=600 Dec 09 15:57:18 crc kubenswrapper[4716]: E1209 15:57:18.217213 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:57:18 crc kubenswrapper[4716]: I1209 15:57:18.938235 4716 generic.go:334] "Generic (PLEG): container finished" podID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerID="c2891ea50ddc12e555d59873e9e677f4240628d25330d260c59e12bf97ac78c4" exitCode=0 Dec 09 15:57:18 crc kubenswrapper[4716]: I1209 15:57:18.938322 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerDied","Data":"c2891ea50ddc12e555d59873e9e677f4240628d25330d260c59e12bf97ac78c4"} Dec 09 15:57:18 crc kubenswrapper[4716]: I1209 15:57:18.938908 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerStarted","Data":"64726c109e2072386d53708377a845812b6c09bfc8936cc323633819d8bc1610"} Dec 09 15:57:18 crc kubenswrapper[4716]: I1209 15:57:18.938965 4716 scope.go:117] "RemoveContainer" containerID="dd82e2eedfe1213cad4ba613e7cdac44ed5c5060200b5e4fcfb856fc8f876bf1" Dec 09 15:57:24 crc kubenswrapper[4716]: E1209 15:57:24.216936 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:57:31 crc kubenswrapper[4716]: E1209 15:57:31.218579 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:57:36 crc kubenswrapper[4716]: E1209 15:57:36.216418 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:57:42 crc kubenswrapper[4716]: E1209 15:57:42.216056 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:57:51 crc kubenswrapper[4716]: E1209 15:57:51.216184 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:57:53 crc kubenswrapper[4716]: E1209 15:57:53.223509 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:58:04 crc kubenswrapper[4716]: E1209 15:58:04.217991 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:58:07 crc kubenswrapper[4716]: E1209 15:58:07.216284 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:58:19 crc kubenswrapper[4716]: E1209 15:58:19.217841 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:58:21 crc kubenswrapper[4716]: I1209 15:58:21.215949 4716 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 09 15:58:21 crc kubenswrapper[4716]: E1209 15:58:21.349690 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 09 15:58:21 crc kubenswrapper[4716]: E1209 15:58:21.349768 4716 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 09 15:58:21 crc kubenswrapper[4716]: E1209 15:58:21.349913 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qfm49,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-msshl_openstack(239c4119-944c-46e1-9425-285eeb6e0204): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 15:58:21 crc kubenswrapper[4716]: E1209 15:58:21.351333 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:58:30 crc kubenswrapper[4716]: E1209 15:58:30.336042 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 15:58:30 crc kubenswrapper[4716]: E1209 15:58:30.336840 4716 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 15:58:30 crc kubenswrapper[4716]: E1209 15:58:30.337073 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5fbh9fh77h667h669h94h589h59dh5bdh5bch9fh68h689h699h559h549h56fh5f5h68fh557h5cch7fh66ch6ch86h56h75h55fh58h5ffhf9h5fdq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lz724,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(29cebb2d-8cdb-49de-a29d-1d02808e46a9): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 15:58:30 crc kubenswrapper[4716]: E1209 15:58:30.338269 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:58:34 crc kubenswrapper[4716]: E1209 15:58:34.218086 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:58:44 crc kubenswrapper[4716]: E1209 15:58:44.215814 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:58:45 crc kubenswrapper[4716]: E1209 15:58:45.216644 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:58:59 crc kubenswrapper[4716]: E1209 15:58:59.216839 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:59:00 crc kubenswrapper[4716]: E1209 15:59:00.216014 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:59:11 crc kubenswrapper[4716]: E1209 15:59:11.216566 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:59:14 crc kubenswrapper[4716]: E1209 15:59:14.216204 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:59:23 crc kubenswrapper[4716]: E1209 15:59:23.224202 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:59:26 crc kubenswrapper[4716]: E1209 15:59:26.216039 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:59:35 crc kubenswrapper[4716]: E1209 15:59:35.216088 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:59:37 crc kubenswrapper[4716]: I1209 15:59:37.181632 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-v59c7"] Dec 09 15:59:37 crc kubenswrapper[4716]: E1209 15:59:37.182975 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ccedd6c-8a0d-49ca-b2a4-59697f842648" containerName="extract-content" Dec 09 15:59:37 crc kubenswrapper[4716]: I1209 15:59:37.183007 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ccedd6c-8a0d-49ca-b2a4-59697f842648" containerName="extract-content" Dec 09 15:59:37 crc kubenswrapper[4716]: E1209 15:59:37.183065 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ccedd6c-8a0d-49ca-b2a4-59697f842648" containerName="registry-server" Dec 09 15:59:37 crc kubenswrapper[4716]: I1209 15:59:37.183071 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ccedd6c-8a0d-49ca-b2a4-59697f842648" containerName="registry-server" Dec 09 15:59:37 crc kubenswrapper[4716]: E1209 15:59:37.183094 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ccedd6c-8a0d-49ca-b2a4-59697f842648" containerName="extract-utilities" Dec 09 15:59:37 crc kubenswrapper[4716]: I1209 15:59:37.183102 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ccedd6c-8a0d-49ca-b2a4-59697f842648" containerName="extract-utilities" Dec 09 15:59:37 crc kubenswrapper[4716]: I1209 15:59:37.183388 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ccedd6c-8a0d-49ca-b2a4-59697f842648" containerName="registry-server" Dec 09 15:59:37 crc kubenswrapper[4716]: I1209 15:59:37.185218 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-v59c7" Dec 09 15:59:37 crc kubenswrapper[4716]: I1209 15:59:37.191541 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-v59c7"] Dec 09 15:59:37 crc kubenswrapper[4716]: I1209 15:59:37.225927 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6574926e-4465-4d78-8a20-4c9746937775-utilities\") pod \"certified-operators-v59c7\" (UID: \"6574926e-4465-4d78-8a20-4c9746937775\") " pod="openshift-marketplace/certified-operators-v59c7" Dec 09 15:59:37 crc kubenswrapper[4716]: I1209 15:59:37.226103 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2n25w\" (UniqueName: \"kubernetes.io/projected/6574926e-4465-4d78-8a20-4c9746937775-kube-api-access-2n25w\") pod \"certified-operators-v59c7\" (UID: \"6574926e-4465-4d78-8a20-4c9746937775\") " pod="openshift-marketplace/certified-operators-v59c7" Dec 09 15:59:37 crc kubenswrapper[4716]: I1209 15:59:37.226215 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6574926e-4465-4d78-8a20-4c9746937775-catalog-content\") pod \"certified-operators-v59c7\" (UID: \"6574926e-4465-4d78-8a20-4c9746937775\") " pod="openshift-marketplace/certified-operators-v59c7" Dec 09 15:59:37 crc kubenswrapper[4716]: I1209 15:59:37.328410 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6574926e-4465-4d78-8a20-4c9746937775-catalog-content\") pod \"certified-operators-v59c7\" (UID: \"6574926e-4465-4d78-8a20-4c9746937775\") " pod="openshift-marketplace/certified-operators-v59c7" Dec 09 15:59:37 crc kubenswrapper[4716]: I1209 15:59:37.328762 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6574926e-4465-4d78-8a20-4c9746937775-utilities\") pod \"certified-operators-v59c7\" (UID: \"6574926e-4465-4d78-8a20-4c9746937775\") " pod="openshift-marketplace/certified-operators-v59c7" Dec 09 15:59:37 crc kubenswrapper[4716]: I1209 15:59:37.328796 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2n25w\" (UniqueName: \"kubernetes.io/projected/6574926e-4465-4d78-8a20-4c9746937775-kube-api-access-2n25w\") pod \"certified-operators-v59c7\" (UID: \"6574926e-4465-4d78-8a20-4c9746937775\") " pod="openshift-marketplace/certified-operators-v59c7" Dec 09 15:59:37 crc kubenswrapper[4716]: I1209 15:59:37.328990 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6574926e-4465-4d78-8a20-4c9746937775-catalog-content\") pod \"certified-operators-v59c7\" (UID: \"6574926e-4465-4d78-8a20-4c9746937775\") " pod="openshift-marketplace/certified-operators-v59c7" Dec 09 15:59:37 crc kubenswrapper[4716]: I1209 15:59:37.329115 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6574926e-4465-4d78-8a20-4c9746937775-utilities\") pod \"certified-operators-v59c7\" (UID: \"6574926e-4465-4d78-8a20-4c9746937775\") " pod="openshift-marketplace/certified-operators-v59c7" Dec 09 15:59:37 crc kubenswrapper[4716]: I1209 15:59:37.356781 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2n25w\" (UniqueName: \"kubernetes.io/projected/6574926e-4465-4d78-8a20-4c9746937775-kube-api-access-2n25w\") pod \"certified-operators-v59c7\" (UID: \"6574926e-4465-4d78-8a20-4c9746937775\") " pod="openshift-marketplace/certified-operators-v59c7" Dec 09 15:59:37 crc kubenswrapper[4716]: I1209 15:59:37.506497 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-v59c7" Dec 09 15:59:38 crc kubenswrapper[4716]: W1209 15:59:38.166907 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6574926e_4465_4d78_8a20_4c9746937775.slice/crio-aaa777a7c44783ea57c5a1e2535739859880e220c373d09ed550468b6e621607 WatchSource:0}: Error finding container aaa777a7c44783ea57c5a1e2535739859880e220c373d09ed550468b6e621607: Status 404 returned error can't find the container with id aaa777a7c44783ea57c5a1e2535739859880e220c373d09ed550468b6e621607 Dec 09 15:59:38 crc kubenswrapper[4716]: I1209 15:59:38.170128 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-v59c7"] Dec 09 15:59:38 crc kubenswrapper[4716]: I1209 15:59:38.433343 4716 generic.go:334] "Generic (PLEG): container finished" podID="6574926e-4465-4d78-8a20-4c9746937775" containerID="12be5ba6b11cc51b58125ff83896cb5618220b29715eaeca166f7565b10846d7" exitCode=0 Dec 09 15:59:38 crc kubenswrapper[4716]: I1209 15:59:38.433556 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v59c7" event={"ID":"6574926e-4465-4d78-8a20-4c9746937775","Type":"ContainerDied","Data":"12be5ba6b11cc51b58125ff83896cb5618220b29715eaeca166f7565b10846d7"} Dec 09 15:59:38 crc kubenswrapper[4716]: I1209 15:59:38.433809 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v59c7" event={"ID":"6574926e-4465-4d78-8a20-4c9746937775","Type":"ContainerStarted","Data":"aaa777a7c44783ea57c5a1e2535739859880e220c373d09ed550468b6e621607"} Dec 09 15:59:40 crc kubenswrapper[4716]: E1209 15:59:40.215616 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:59:40 crc kubenswrapper[4716]: I1209 15:59:40.546178 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v59c7" event={"ID":"6574926e-4465-4d78-8a20-4c9746937775","Type":"ContainerStarted","Data":"52343c136d2471f471b76dd29f673f9b8c84b9acf51f3ae9e796add9e99513dd"} Dec 09 15:59:41 crc kubenswrapper[4716]: I1209 15:59:41.641413 4716 generic.go:334] "Generic (PLEG): container finished" podID="6574926e-4465-4d78-8a20-4c9746937775" containerID="52343c136d2471f471b76dd29f673f9b8c84b9acf51f3ae9e796add9e99513dd" exitCode=0 Dec 09 15:59:41 crc kubenswrapper[4716]: I1209 15:59:41.641531 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v59c7" event={"ID":"6574926e-4465-4d78-8a20-4c9746937775","Type":"ContainerDied","Data":"52343c136d2471f471b76dd29f673f9b8c84b9acf51f3ae9e796add9e99513dd"} Dec 09 15:59:41 crc kubenswrapper[4716]: I1209 15:59:41.642084 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v59c7" event={"ID":"6574926e-4465-4d78-8a20-4c9746937775","Type":"ContainerStarted","Data":"3db46797c35d0251ea9625e6cde0e8dbe6857cc63ae05b1593652df8c57f60f1"} Dec 09 15:59:42 crc kubenswrapper[4716]: I1209 15:59:42.355844 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-v59c7" podStartSLOduration=2.681056489 podStartE2EDuration="5.355808206s" podCreationTimestamp="2025-12-09 15:59:37 +0000 UTC" firstStartedPulling="2025-12-09 15:59:38.435554383 +0000 UTC m=+3065.590298371" lastFinishedPulling="2025-12-09 15:59:41.1103061 +0000 UTC m=+3068.265050088" observedRunningTime="2025-12-09 15:59:41.681139611 +0000 UTC m=+3068.835883599" watchObservedRunningTime="2025-12-09 15:59:42.355808206 +0000 UTC m=+3069.510552194" Dec 09 15:59:42 crc kubenswrapper[4716]: I1209 15:59:42.360569 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-dtdsg"] Dec 09 15:59:42 crc kubenswrapper[4716]: I1209 15:59:42.363258 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dtdsg" Dec 09 15:59:42 crc kubenswrapper[4716]: I1209 15:59:42.373902 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dtdsg"] Dec 09 15:59:42 crc kubenswrapper[4716]: I1209 15:59:42.536177 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e63faed-8d6e-4b22-97be-20cc79795147-utilities\") pod \"redhat-operators-dtdsg\" (UID: \"6e63faed-8d6e-4b22-97be-20cc79795147\") " pod="openshift-marketplace/redhat-operators-dtdsg" Dec 09 15:59:42 crc kubenswrapper[4716]: I1209 15:59:42.664136 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e63faed-8d6e-4b22-97be-20cc79795147-catalog-content\") pod \"redhat-operators-dtdsg\" (UID: \"6e63faed-8d6e-4b22-97be-20cc79795147\") " pod="openshift-marketplace/redhat-operators-dtdsg" Dec 09 15:59:42 crc kubenswrapper[4716]: I1209 15:59:42.664806 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bj79z\" (UniqueName: \"kubernetes.io/projected/6e63faed-8d6e-4b22-97be-20cc79795147-kube-api-access-bj79z\") pod \"redhat-operators-dtdsg\" (UID: \"6e63faed-8d6e-4b22-97be-20cc79795147\") " pod="openshift-marketplace/redhat-operators-dtdsg" Dec 09 15:59:42 crc kubenswrapper[4716]: I1209 15:59:42.767302 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bj79z\" (UniqueName: \"kubernetes.io/projected/6e63faed-8d6e-4b22-97be-20cc79795147-kube-api-access-bj79z\") pod \"redhat-operators-dtdsg\" (UID: \"6e63faed-8d6e-4b22-97be-20cc79795147\") " pod="openshift-marketplace/redhat-operators-dtdsg" Dec 09 15:59:42 crc kubenswrapper[4716]: I1209 15:59:42.767449 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e63faed-8d6e-4b22-97be-20cc79795147-utilities\") pod \"redhat-operators-dtdsg\" (UID: \"6e63faed-8d6e-4b22-97be-20cc79795147\") " pod="openshift-marketplace/redhat-operators-dtdsg" Dec 09 15:59:42 crc kubenswrapper[4716]: I1209 15:59:42.767504 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e63faed-8d6e-4b22-97be-20cc79795147-catalog-content\") pod \"redhat-operators-dtdsg\" (UID: \"6e63faed-8d6e-4b22-97be-20cc79795147\") " pod="openshift-marketplace/redhat-operators-dtdsg" Dec 09 15:59:42 crc kubenswrapper[4716]: I1209 15:59:42.768187 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e63faed-8d6e-4b22-97be-20cc79795147-catalog-content\") pod \"redhat-operators-dtdsg\" (UID: \"6e63faed-8d6e-4b22-97be-20cc79795147\") " pod="openshift-marketplace/redhat-operators-dtdsg" Dec 09 15:59:42 crc kubenswrapper[4716]: I1209 15:59:42.768485 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e63faed-8d6e-4b22-97be-20cc79795147-utilities\") pod \"redhat-operators-dtdsg\" (UID: \"6e63faed-8d6e-4b22-97be-20cc79795147\") " pod="openshift-marketplace/redhat-operators-dtdsg" Dec 09 15:59:42 crc kubenswrapper[4716]: I1209 15:59:42.788901 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bj79z\" (UniqueName: \"kubernetes.io/projected/6e63faed-8d6e-4b22-97be-20cc79795147-kube-api-access-bj79z\") pod \"redhat-operators-dtdsg\" (UID: \"6e63faed-8d6e-4b22-97be-20cc79795147\") " pod="openshift-marketplace/redhat-operators-dtdsg" Dec 09 15:59:42 crc kubenswrapper[4716]: I1209 15:59:42.993445 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dtdsg" Dec 09 15:59:43 crc kubenswrapper[4716]: I1209 15:59:43.516907 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dtdsg"] Dec 09 15:59:43 crc kubenswrapper[4716]: W1209 15:59:43.527822 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6e63faed_8d6e_4b22_97be_20cc79795147.slice/crio-4768241883b59e63b7491ecf14dd21b161a2b9f874ffe4c3803595ebabe9aedb WatchSource:0}: Error finding container 4768241883b59e63b7491ecf14dd21b161a2b9f874ffe4c3803595ebabe9aedb: Status 404 returned error can't find the container with id 4768241883b59e63b7491ecf14dd21b161a2b9f874ffe4c3803595ebabe9aedb Dec 09 15:59:43 crc kubenswrapper[4716]: I1209 15:59:43.967829 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dtdsg" event={"ID":"6e63faed-8d6e-4b22-97be-20cc79795147","Type":"ContainerStarted","Data":"4768241883b59e63b7491ecf14dd21b161a2b9f874ffe4c3803595ebabe9aedb"} Dec 09 15:59:44 crc kubenswrapper[4716]: I1209 15:59:44.979564 4716 generic.go:334] "Generic (PLEG): container finished" podID="6e63faed-8d6e-4b22-97be-20cc79795147" containerID="feb860df9e8477054f266780ffe476cc12f644ac6038e17f61c3a7b7b950812c" exitCode=0 Dec 09 15:59:44 crc kubenswrapper[4716]: I1209 15:59:44.979682 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dtdsg" event={"ID":"6e63faed-8d6e-4b22-97be-20cc79795147","Type":"ContainerDied","Data":"feb860df9e8477054f266780ffe476cc12f644ac6038e17f61c3a7b7b950812c"} Dec 09 15:59:46 crc kubenswrapper[4716]: E1209 15:59:46.216658 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 15:59:47 crc kubenswrapper[4716]: I1209 15:59:47.507754 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-v59c7" Dec 09 15:59:47 crc kubenswrapper[4716]: I1209 15:59:47.508738 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-v59c7" Dec 09 15:59:47 crc kubenswrapper[4716]: I1209 15:59:47.568320 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-v59c7" Dec 09 15:59:47 crc kubenswrapper[4716]: I1209 15:59:47.922043 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 15:59:47 crc kubenswrapper[4716]: I1209 15:59:47.922124 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 15:59:48 crc kubenswrapper[4716]: I1209 15:59:48.323523 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-v59c7" Dec 09 15:59:48 crc kubenswrapper[4716]: I1209 15:59:48.751093 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-v59c7"] Dec 09 15:59:50 crc kubenswrapper[4716]: I1209 15:59:50.169385 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-v59c7" podUID="6574926e-4465-4d78-8a20-4c9746937775" containerName="registry-server" containerID="cri-o://3db46797c35d0251ea9625e6cde0e8dbe6857cc63ae05b1593652df8c57f60f1" gracePeriod=2 Dec 09 15:59:51 crc kubenswrapper[4716]: I1209 15:59:51.183510 4716 generic.go:334] "Generic (PLEG): container finished" podID="6574926e-4465-4d78-8a20-4c9746937775" containerID="3db46797c35d0251ea9625e6cde0e8dbe6857cc63ae05b1593652df8c57f60f1" exitCode=0 Dec 09 15:59:51 crc kubenswrapper[4716]: I1209 15:59:51.183599 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v59c7" event={"ID":"6574926e-4465-4d78-8a20-4c9746937775","Type":"ContainerDied","Data":"3db46797c35d0251ea9625e6cde0e8dbe6857cc63ae05b1593652df8c57f60f1"} Dec 09 15:59:51 crc kubenswrapper[4716]: E1209 15:59:51.215945 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 15:59:57 crc kubenswrapper[4716]: E1209 15:59:57.514024 4716 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3db46797c35d0251ea9625e6cde0e8dbe6857cc63ae05b1593652df8c57f60f1 is running failed: container process not found" containerID="3db46797c35d0251ea9625e6cde0e8dbe6857cc63ae05b1593652df8c57f60f1" cmd=["grpc_health_probe","-addr=:50051"] Dec 09 15:59:57 crc kubenswrapper[4716]: E1209 15:59:57.516088 4716 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3db46797c35d0251ea9625e6cde0e8dbe6857cc63ae05b1593652df8c57f60f1 is running failed: container process not found" containerID="3db46797c35d0251ea9625e6cde0e8dbe6857cc63ae05b1593652df8c57f60f1" cmd=["grpc_health_probe","-addr=:50051"] Dec 09 15:59:57 crc kubenswrapper[4716]: E1209 15:59:57.516386 4716 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3db46797c35d0251ea9625e6cde0e8dbe6857cc63ae05b1593652df8c57f60f1 is running failed: container process not found" containerID="3db46797c35d0251ea9625e6cde0e8dbe6857cc63ae05b1593652df8c57f60f1" cmd=["grpc_health_probe","-addr=:50051"] Dec 09 15:59:57 crc kubenswrapper[4716]: E1209 15:59:57.516415 4716 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 3db46797c35d0251ea9625e6cde0e8dbe6857cc63ae05b1593652df8c57f60f1 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/certified-operators-v59c7" podUID="6574926e-4465-4d78-8a20-4c9746937775" containerName="registry-server" Dec 09 15:59:57 crc kubenswrapper[4716]: I1209 15:59:57.764000 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-v59c7" Dec 09 15:59:57 crc kubenswrapper[4716]: I1209 15:59:57.916005 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6574926e-4465-4d78-8a20-4c9746937775-catalog-content\") pod \"6574926e-4465-4d78-8a20-4c9746937775\" (UID: \"6574926e-4465-4d78-8a20-4c9746937775\") " Dec 09 15:59:57 crc kubenswrapper[4716]: I1209 15:59:57.916269 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2n25w\" (UniqueName: \"kubernetes.io/projected/6574926e-4465-4d78-8a20-4c9746937775-kube-api-access-2n25w\") pod \"6574926e-4465-4d78-8a20-4c9746937775\" (UID: \"6574926e-4465-4d78-8a20-4c9746937775\") " Dec 09 15:59:57 crc kubenswrapper[4716]: I1209 15:59:57.916453 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6574926e-4465-4d78-8a20-4c9746937775-utilities\") pod \"6574926e-4465-4d78-8a20-4c9746937775\" (UID: \"6574926e-4465-4d78-8a20-4c9746937775\") " Dec 09 15:59:57 crc kubenswrapper[4716]: I1209 15:59:57.917349 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6574926e-4465-4d78-8a20-4c9746937775-utilities" (OuterVolumeSpecName: "utilities") pod "6574926e-4465-4d78-8a20-4c9746937775" (UID: "6574926e-4465-4d78-8a20-4c9746937775"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:59:57 crc kubenswrapper[4716]: I1209 15:59:57.922091 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6574926e-4465-4d78-8a20-4c9746937775-kube-api-access-2n25w" (OuterVolumeSpecName: "kube-api-access-2n25w") pod "6574926e-4465-4d78-8a20-4c9746937775" (UID: "6574926e-4465-4d78-8a20-4c9746937775"). InnerVolumeSpecName "kube-api-access-2n25w". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 15:59:57 crc kubenswrapper[4716]: I1209 15:59:57.950739 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6574926e-4465-4d78-8a20-4c9746937775-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6574926e-4465-4d78-8a20-4c9746937775" (UID: "6574926e-4465-4d78-8a20-4c9746937775"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 15:59:58 crc kubenswrapper[4716]: I1209 15:59:58.019240 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6574926e-4465-4d78-8a20-4c9746937775-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 15:59:58 crc kubenswrapper[4716]: I1209 15:59:58.019277 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2n25w\" (UniqueName: \"kubernetes.io/projected/6574926e-4465-4d78-8a20-4c9746937775-kube-api-access-2n25w\") on node \"crc\" DevicePath \"\"" Dec 09 15:59:58 crc kubenswrapper[4716]: I1209 15:59:58.019295 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6574926e-4465-4d78-8a20-4c9746937775-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 15:59:58 crc kubenswrapper[4716]: I1209 15:59:58.839955 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-v59c7" Dec 09 15:59:58 crc kubenswrapper[4716]: I1209 15:59:58.839938 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v59c7" event={"ID":"6574926e-4465-4d78-8a20-4c9746937775","Type":"ContainerDied","Data":"aaa777a7c44783ea57c5a1e2535739859880e220c373d09ed550468b6e621607"} Dec 09 15:59:58 crc kubenswrapper[4716]: I1209 15:59:58.841412 4716 scope.go:117] "RemoveContainer" containerID="3db46797c35d0251ea9625e6cde0e8dbe6857cc63ae05b1593652df8c57f60f1" Dec 09 15:59:58 crc kubenswrapper[4716]: I1209 15:59:58.844099 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dtdsg" event={"ID":"6e63faed-8d6e-4b22-97be-20cc79795147","Type":"ContainerStarted","Data":"4b3e1bc3719e2b3fd252ab08471c03ad4aa563e02cddc7fff8089f78f09fe1c0"} Dec 09 15:59:58 crc kubenswrapper[4716]: I1209 15:59:58.902016 4716 scope.go:117] "RemoveContainer" containerID="52343c136d2471f471b76dd29f673f9b8c84b9acf51f3ae9e796add9e99513dd" Dec 09 15:59:58 crc kubenswrapper[4716]: I1209 15:59:58.935505 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-v59c7"] Dec 09 15:59:58 crc kubenswrapper[4716]: I1209 15:59:58.945729 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-v59c7"] Dec 09 15:59:59 crc kubenswrapper[4716]: I1209 15:59:59.024569 4716 scope.go:117] "RemoveContainer" containerID="12be5ba6b11cc51b58125ff83896cb5618220b29715eaeca166f7565b10846d7" Dec 09 15:59:59 crc kubenswrapper[4716]: I1209 15:59:59.227688 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6574926e-4465-4d78-8a20-4c9746937775" path="/var/lib/kubelet/pods/6574926e-4465-4d78-8a20-4c9746937775/volumes" Dec 09 16:00:00 crc kubenswrapper[4716]: E1209 16:00:00.398766 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:00:00 crc kubenswrapper[4716]: I1209 16:00:00.475585 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421600-vwwnm"] Dec 09 16:00:00 crc kubenswrapper[4716]: E1209 16:00:00.476441 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6574926e-4465-4d78-8a20-4c9746937775" containerName="registry-server" Dec 09 16:00:00 crc kubenswrapper[4716]: I1209 16:00:00.476459 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="6574926e-4465-4d78-8a20-4c9746937775" containerName="registry-server" Dec 09 16:00:00 crc kubenswrapper[4716]: E1209 16:00:00.476486 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6574926e-4465-4d78-8a20-4c9746937775" containerName="extract-utilities" Dec 09 16:00:00 crc kubenswrapper[4716]: I1209 16:00:00.476492 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="6574926e-4465-4d78-8a20-4c9746937775" containerName="extract-utilities" Dec 09 16:00:00 crc kubenswrapper[4716]: E1209 16:00:00.476537 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6574926e-4465-4d78-8a20-4c9746937775" containerName="extract-content" Dec 09 16:00:00 crc kubenswrapper[4716]: I1209 16:00:00.476543 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="6574926e-4465-4d78-8a20-4c9746937775" containerName="extract-content" Dec 09 16:00:00 crc kubenswrapper[4716]: I1209 16:00:00.476817 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="6574926e-4465-4d78-8a20-4c9746937775" containerName="registry-server" Dec 09 16:00:00 crc kubenswrapper[4716]: I1209 16:00:00.477849 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421600-vwwnm" Dec 09 16:00:00 crc kubenswrapper[4716]: I1209 16:00:00.480014 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 09 16:00:00 crc kubenswrapper[4716]: I1209 16:00:00.480017 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 09 16:00:00 crc kubenswrapper[4716]: I1209 16:00:00.500152 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421600-vwwnm"] Dec 09 16:00:00 crc kubenswrapper[4716]: I1209 16:00:00.512973 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/26e6b88c-3937-4dac-a165-9df99ab47f6b-secret-volume\") pod \"collect-profiles-29421600-vwwnm\" (UID: \"26e6b88c-3937-4dac-a165-9df99ab47f6b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421600-vwwnm" Dec 09 16:00:00 crc kubenswrapper[4716]: I1209 16:00:00.513272 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-stvf9\" (UniqueName: \"kubernetes.io/projected/26e6b88c-3937-4dac-a165-9df99ab47f6b-kube-api-access-stvf9\") pod \"collect-profiles-29421600-vwwnm\" (UID: \"26e6b88c-3937-4dac-a165-9df99ab47f6b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421600-vwwnm" Dec 09 16:00:00 crc kubenswrapper[4716]: I1209 16:00:00.513461 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/26e6b88c-3937-4dac-a165-9df99ab47f6b-config-volume\") pod \"collect-profiles-29421600-vwwnm\" (UID: \"26e6b88c-3937-4dac-a165-9df99ab47f6b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421600-vwwnm" Dec 09 16:00:00 crc kubenswrapper[4716]: I1209 16:00:00.617366 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/26e6b88c-3937-4dac-a165-9df99ab47f6b-secret-volume\") pod \"collect-profiles-29421600-vwwnm\" (UID: \"26e6b88c-3937-4dac-a165-9df99ab47f6b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421600-vwwnm" Dec 09 16:00:00 crc kubenswrapper[4716]: I1209 16:00:00.617645 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-stvf9\" (UniqueName: \"kubernetes.io/projected/26e6b88c-3937-4dac-a165-9df99ab47f6b-kube-api-access-stvf9\") pod \"collect-profiles-29421600-vwwnm\" (UID: \"26e6b88c-3937-4dac-a165-9df99ab47f6b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421600-vwwnm" Dec 09 16:00:00 crc kubenswrapper[4716]: I1209 16:00:00.617817 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/26e6b88c-3937-4dac-a165-9df99ab47f6b-config-volume\") pod \"collect-profiles-29421600-vwwnm\" (UID: \"26e6b88c-3937-4dac-a165-9df99ab47f6b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421600-vwwnm" Dec 09 16:00:00 crc kubenswrapper[4716]: I1209 16:00:00.618820 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/26e6b88c-3937-4dac-a165-9df99ab47f6b-config-volume\") pod \"collect-profiles-29421600-vwwnm\" (UID: \"26e6b88c-3937-4dac-a165-9df99ab47f6b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421600-vwwnm" Dec 09 16:00:00 crc kubenswrapper[4716]: I1209 16:00:00.626668 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/26e6b88c-3937-4dac-a165-9df99ab47f6b-secret-volume\") pod \"collect-profiles-29421600-vwwnm\" (UID: \"26e6b88c-3937-4dac-a165-9df99ab47f6b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421600-vwwnm" Dec 09 16:00:00 crc kubenswrapper[4716]: I1209 16:00:00.641394 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-stvf9\" (UniqueName: \"kubernetes.io/projected/26e6b88c-3937-4dac-a165-9df99ab47f6b-kube-api-access-stvf9\") pod \"collect-profiles-29421600-vwwnm\" (UID: \"26e6b88c-3937-4dac-a165-9df99ab47f6b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421600-vwwnm" Dec 09 16:00:00 crc kubenswrapper[4716]: I1209 16:00:00.800667 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421600-vwwnm" Dec 09 16:00:00 crc kubenswrapper[4716]: I1209 16:00:00.870135 4716 generic.go:334] "Generic (PLEG): container finished" podID="6e63faed-8d6e-4b22-97be-20cc79795147" containerID="4b3e1bc3719e2b3fd252ab08471c03ad4aa563e02cddc7fff8089f78f09fe1c0" exitCode=0 Dec 09 16:00:00 crc kubenswrapper[4716]: I1209 16:00:00.870181 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dtdsg" event={"ID":"6e63faed-8d6e-4b22-97be-20cc79795147","Type":"ContainerDied","Data":"4b3e1bc3719e2b3fd252ab08471c03ad4aa563e02cddc7fff8089f78f09fe1c0"} Dec 09 16:00:01 crc kubenswrapper[4716]: W1209 16:00:01.258473 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod26e6b88c_3937_4dac_a165_9df99ab47f6b.slice/crio-0588227ed3c7f807eab1b2adb8f16974884f80c3f10ed65ec6eb74389ebf1ff8 WatchSource:0}: Error finding container 0588227ed3c7f807eab1b2adb8f16974884f80c3f10ed65ec6eb74389ebf1ff8: Status 404 returned error can't find the container with id 0588227ed3c7f807eab1b2adb8f16974884f80c3f10ed65ec6eb74389ebf1ff8 Dec 09 16:00:01 crc kubenswrapper[4716]: I1209 16:00:01.260066 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421600-vwwnm"] Dec 09 16:00:01 crc kubenswrapper[4716]: I1209 16:00:01.881661 4716 generic.go:334] "Generic (PLEG): container finished" podID="26e6b88c-3937-4dac-a165-9df99ab47f6b" containerID="0c71982a76d9ab1fcf06031eafb4b29ae75c05ad2a6601a4265f228933599b80" exitCode=0 Dec 09 16:00:01 crc kubenswrapper[4716]: I1209 16:00:01.881792 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421600-vwwnm" event={"ID":"26e6b88c-3937-4dac-a165-9df99ab47f6b","Type":"ContainerDied","Data":"0c71982a76d9ab1fcf06031eafb4b29ae75c05ad2a6601a4265f228933599b80"} Dec 09 16:00:01 crc kubenswrapper[4716]: I1209 16:00:01.881985 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421600-vwwnm" event={"ID":"26e6b88c-3937-4dac-a165-9df99ab47f6b","Type":"ContainerStarted","Data":"0588227ed3c7f807eab1b2adb8f16974884f80c3f10ed65ec6eb74389ebf1ff8"} Dec 09 16:00:02 crc kubenswrapper[4716]: I1209 16:00:02.975385 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dtdsg" event={"ID":"6e63faed-8d6e-4b22-97be-20cc79795147","Type":"ContainerStarted","Data":"88d2b581184ab3a2ee28df76595e8b501be42f774c2005da731137317fbccc75"} Dec 09 16:00:02 crc kubenswrapper[4716]: I1209 16:00:02.993615 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-dtdsg" Dec 09 16:00:02 crc kubenswrapper[4716]: I1209 16:00:02.994338 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-dtdsg" Dec 09 16:00:03 crc kubenswrapper[4716]: I1209 16:00:03.004412 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-dtdsg" podStartSLOduration=4.003546461 podStartE2EDuration="21.004391202s" podCreationTimestamp="2025-12-09 15:59:42 +0000 UTC" firstStartedPulling="2025-12-09 15:59:44.982313114 +0000 UTC m=+3072.137057102" lastFinishedPulling="2025-12-09 16:00:01.983157855 +0000 UTC m=+3089.137901843" observedRunningTime="2025-12-09 16:00:02.999476344 +0000 UTC m=+3090.154220342" watchObservedRunningTime="2025-12-09 16:00:03.004391202 +0000 UTC m=+3090.159135190" Dec 09 16:00:03 crc kubenswrapper[4716]: I1209 16:00:03.415691 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421600-vwwnm" Dec 09 16:00:03 crc kubenswrapper[4716]: I1209 16:00:03.582510 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-stvf9\" (UniqueName: \"kubernetes.io/projected/26e6b88c-3937-4dac-a165-9df99ab47f6b-kube-api-access-stvf9\") pod \"26e6b88c-3937-4dac-a165-9df99ab47f6b\" (UID: \"26e6b88c-3937-4dac-a165-9df99ab47f6b\") " Dec 09 16:00:03 crc kubenswrapper[4716]: I1209 16:00:03.582707 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/26e6b88c-3937-4dac-a165-9df99ab47f6b-secret-volume\") pod \"26e6b88c-3937-4dac-a165-9df99ab47f6b\" (UID: \"26e6b88c-3937-4dac-a165-9df99ab47f6b\") " Dec 09 16:00:03 crc kubenswrapper[4716]: I1209 16:00:03.582833 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/26e6b88c-3937-4dac-a165-9df99ab47f6b-config-volume\") pod \"26e6b88c-3937-4dac-a165-9df99ab47f6b\" (UID: \"26e6b88c-3937-4dac-a165-9df99ab47f6b\") " Dec 09 16:00:03 crc kubenswrapper[4716]: I1209 16:00:03.583403 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/26e6b88c-3937-4dac-a165-9df99ab47f6b-config-volume" (OuterVolumeSpecName: "config-volume") pod "26e6b88c-3937-4dac-a165-9df99ab47f6b" (UID: "26e6b88c-3937-4dac-a165-9df99ab47f6b"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:00:03 crc kubenswrapper[4716]: I1209 16:00:03.584242 4716 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/26e6b88c-3937-4dac-a165-9df99ab47f6b-config-volume\") on node \"crc\" DevicePath \"\"" Dec 09 16:00:03 crc kubenswrapper[4716]: I1209 16:00:03.593965 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/26e6b88c-3937-4dac-a165-9df99ab47f6b-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "26e6b88c-3937-4dac-a165-9df99ab47f6b" (UID: "26e6b88c-3937-4dac-a165-9df99ab47f6b"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:00:03 crc kubenswrapper[4716]: I1209 16:00:03.594069 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/26e6b88c-3937-4dac-a165-9df99ab47f6b-kube-api-access-stvf9" (OuterVolumeSpecName: "kube-api-access-stvf9") pod "26e6b88c-3937-4dac-a165-9df99ab47f6b" (UID: "26e6b88c-3937-4dac-a165-9df99ab47f6b"). InnerVolumeSpecName "kube-api-access-stvf9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:00:03 crc kubenswrapper[4716]: I1209 16:00:03.686572 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-stvf9\" (UniqueName: \"kubernetes.io/projected/26e6b88c-3937-4dac-a165-9df99ab47f6b-kube-api-access-stvf9\") on node \"crc\" DevicePath \"\"" Dec 09 16:00:03 crc kubenswrapper[4716]: I1209 16:00:03.686615 4716 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/26e6b88c-3937-4dac-a165-9df99ab47f6b-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 09 16:00:03 crc kubenswrapper[4716]: I1209 16:00:03.997614 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421600-vwwnm" Dec 09 16:00:03 crc kubenswrapper[4716]: I1209 16:00:03.997713 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421600-vwwnm" event={"ID":"26e6b88c-3937-4dac-a165-9df99ab47f6b","Type":"ContainerDied","Data":"0588227ed3c7f807eab1b2adb8f16974884f80c3f10ed65ec6eb74389ebf1ff8"} Dec 09 16:00:03 crc kubenswrapper[4716]: I1209 16:00:03.997757 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0588227ed3c7f807eab1b2adb8f16974884f80c3f10ed65ec6eb74389ebf1ff8" Dec 09 16:00:04 crc kubenswrapper[4716]: I1209 16:00:04.059321 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-dtdsg" podUID="6e63faed-8d6e-4b22-97be-20cc79795147" containerName="registry-server" probeResult="failure" output=< Dec 09 16:00:04 crc kubenswrapper[4716]: timeout: failed to connect service ":50051" within 1s Dec 09 16:00:04 crc kubenswrapper[4716]: > Dec 09 16:00:04 crc kubenswrapper[4716]: I1209 16:00:04.506389 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421555-9mxcj"] Dec 09 16:00:04 crc kubenswrapper[4716]: I1209 16:00:04.518607 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421555-9mxcj"] Dec 09 16:00:05 crc kubenswrapper[4716]: I1209 16:00:05.235253 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be35eec6-211e-4a8e-8af2-77699f4e5953" path="/var/lib/kubelet/pods/be35eec6-211e-4a8e-8af2-77699f4e5953/volumes" Dec 09 16:00:06 crc kubenswrapper[4716]: E1209 16:00:06.217460 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:00:13 crc kubenswrapper[4716]: I1209 16:00:13.071730 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-dtdsg" Dec 09 16:00:13 crc kubenswrapper[4716]: I1209 16:00:13.131212 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-dtdsg" Dec 09 16:00:13 crc kubenswrapper[4716]: E1209 16:00:13.223592 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:00:13 crc kubenswrapper[4716]: I1209 16:00:13.584714 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dtdsg"] Dec 09 16:00:13 crc kubenswrapper[4716]: I1209 16:00:13.670698 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-brx57"] Dec 09 16:00:13 crc kubenswrapper[4716]: I1209 16:00:13.670992 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-brx57" podUID="2fda88cc-680a-4a33-a9ea-547cf12e50d9" containerName="registry-server" containerID="cri-o://1b4a542f250b35995cbad13fa30df218c098f88a713534bc2b68d5de22a84d32" gracePeriod=2 Dec 09 16:00:14 crc kubenswrapper[4716]: I1209 16:00:14.273899 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-brx57" Dec 09 16:00:14 crc kubenswrapper[4716]: I1209 16:00:14.306963 4716 generic.go:334] "Generic (PLEG): container finished" podID="2fda88cc-680a-4a33-a9ea-547cf12e50d9" containerID="1b4a542f250b35995cbad13fa30df218c098f88a713534bc2b68d5de22a84d32" exitCode=0 Dec 09 16:00:14 crc kubenswrapper[4716]: I1209 16:00:14.308192 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-brx57" Dec 09 16:00:14 crc kubenswrapper[4716]: I1209 16:00:14.308724 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-brx57" event={"ID":"2fda88cc-680a-4a33-a9ea-547cf12e50d9","Type":"ContainerDied","Data":"1b4a542f250b35995cbad13fa30df218c098f88a713534bc2b68d5de22a84d32"} Dec 09 16:00:14 crc kubenswrapper[4716]: I1209 16:00:14.308755 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-brx57" event={"ID":"2fda88cc-680a-4a33-a9ea-547cf12e50d9","Type":"ContainerDied","Data":"70840a0aa54a94a9e9d1ed7c86b41150eb075d7eabf0349374dae35a350d0ec2"} Dec 09 16:00:14 crc kubenswrapper[4716]: I1209 16:00:14.308793 4716 scope.go:117] "RemoveContainer" containerID="1b4a542f250b35995cbad13fa30df218c098f88a713534bc2b68d5de22a84d32" Dec 09 16:00:14 crc kubenswrapper[4716]: I1209 16:00:14.352044 4716 scope.go:117] "RemoveContainer" containerID="ebed3b352756bc9e33b40b3d995e1dc664e8e3bd03e88019297120219ed34c1c" Dec 09 16:00:14 crc kubenswrapper[4716]: I1209 16:00:14.370838 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2fda88cc-680a-4a33-a9ea-547cf12e50d9-catalog-content\") pod \"2fda88cc-680a-4a33-a9ea-547cf12e50d9\" (UID: \"2fda88cc-680a-4a33-a9ea-547cf12e50d9\") " Dec 09 16:00:14 crc kubenswrapper[4716]: I1209 16:00:14.371076 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2fda88cc-680a-4a33-a9ea-547cf12e50d9-utilities\") pod \"2fda88cc-680a-4a33-a9ea-547cf12e50d9\" (UID: \"2fda88cc-680a-4a33-a9ea-547cf12e50d9\") " Dec 09 16:00:14 crc kubenswrapper[4716]: I1209 16:00:14.371192 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pvndw\" (UniqueName: \"kubernetes.io/projected/2fda88cc-680a-4a33-a9ea-547cf12e50d9-kube-api-access-pvndw\") pod \"2fda88cc-680a-4a33-a9ea-547cf12e50d9\" (UID: \"2fda88cc-680a-4a33-a9ea-547cf12e50d9\") " Dec 09 16:00:14 crc kubenswrapper[4716]: I1209 16:00:14.371560 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2fda88cc-680a-4a33-a9ea-547cf12e50d9-utilities" (OuterVolumeSpecName: "utilities") pod "2fda88cc-680a-4a33-a9ea-547cf12e50d9" (UID: "2fda88cc-680a-4a33-a9ea-547cf12e50d9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:00:14 crc kubenswrapper[4716]: I1209 16:00:14.372402 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2fda88cc-680a-4a33-a9ea-547cf12e50d9-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 16:00:14 crc kubenswrapper[4716]: I1209 16:00:14.400019 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2fda88cc-680a-4a33-a9ea-547cf12e50d9-kube-api-access-pvndw" (OuterVolumeSpecName: "kube-api-access-pvndw") pod "2fda88cc-680a-4a33-a9ea-547cf12e50d9" (UID: "2fda88cc-680a-4a33-a9ea-547cf12e50d9"). InnerVolumeSpecName "kube-api-access-pvndw". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:00:14 crc kubenswrapper[4716]: I1209 16:00:14.415382 4716 scope.go:117] "RemoveContainer" containerID="ccf749ef658d028d6cc3c8542967f65f61ab7f298694f21a77f877620652234a" Dec 09 16:00:14 crc kubenswrapper[4716]: I1209 16:00:14.475331 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pvndw\" (UniqueName: \"kubernetes.io/projected/2fda88cc-680a-4a33-a9ea-547cf12e50d9-kube-api-access-pvndw\") on node \"crc\" DevicePath \"\"" Dec 09 16:00:14 crc kubenswrapper[4716]: I1209 16:00:14.506581 4716 scope.go:117] "RemoveContainer" containerID="d24df05d7fab51af97ebe0ecb9f2e8437c0a5a04bc3596395bd64614a1b979cc" Dec 09 16:00:14 crc kubenswrapper[4716]: I1209 16:00:14.509256 4716 scope.go:117] "RemoveContainer" containerID="1b4a542f250b35995cbad13fa30df218c098f88a713534bc2b68d5de22a84d32" Dec 09 16:00:14 crc kubenswrapper[4716]: E1209 16:00:14.509760 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1b4a542f250b35995cbad13fa30df218c098f88a713534bc2b68d5de22a84d32\": container with ID starting with 1b4a542f250b35995cbad13fa30df218c098f88a713534bc2b68d5de22a84d32 not found: ID does not exist" containerID="1b4a542f250b35995cbad13fa30df218c098f88a713534bc2b68d5de22a84d32" Dec 09 16:00:14 crc kubenswrapper[4716]: I1209 16:00:14.509792 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b4a542f250b35995cbad13fa30df218c098f88a713534bc2b68d5de22a84d32"} err="failed to get container status \"1b4a542f250b35995cbad13fa30df218c098f88a713534bc2b68d5de22a84d32\": rpc error: code = NotFound desc = could not find container \"1b4a542f250b35995cbad13fa30df218c098f88a713534bc2b68d5de22a84d32\": container with ID starting with 1b4a542f250b35995cbad13fa30df218c098f88a713534bc2b68d5de22a84d32 not found: ID does not exist" Dec 09 16:00:14 crc kubenswrapper[4716]: I1209 16:00:14.509830 4716 scope.go:117] "RemoveContainer" containerID="ebed3b352756bc9e33b40b3d995e1dc664e8e3bd03e88019297120219ed34c1c" Dec 09 16:00:14 crc kubenswrapper[4716]: E1209 16:00:14.510181 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ebed3b352756bc9e33b40b3d995e1dc664e8e3bd03e88019297120219ed34c1c\": container with ID starting with ebed3b352756bc9e33b40b3d995e1dc664e8e3bd03e88019297120219ed34c1c not found: ID does not exist" containerID="ebed3b352756bc9e33b40b3d995e1dc664e8e3bd03e88019297120219ed34c1c" Dec 09 16:00:14 crc kubenswrapper[4716]: I1209 16:00:14.510230 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ebed3b352756bc9e33b40b3d995e1dc664e8e3bd03e88019297120219ed34c1c"} err="failed to get container status \"ebed3b352756bc9e33b40b3d995e1dc664e8e3bd03e88019297120219ed34c1c\": rpc error: code = NotFound desc = could not find container \"ebed3b352756bc9e33b40b3d995e1dc664e8e3bd03e88019297120219ed34c1c\": container with ID starting with ebed3b352756bc9e33b40b3d995e1dc664e8e3bd03e88019297120219ed34c1c not found: ID does not exist" Dec 09 16:00:14 crc kubenswrapper[4716]: I1209 16:00:14.510243 4716 scope.go:117] "RemoveContainer" containerID="ccf749ef658d028d6cc3c8542967f65f61ab7f298694f21a77f877620652234a" Dec 09 16:00:14 crc kubenswrapper[4716]: E1209 16:00:14.510686 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ccf749ef658d028d6cc3c8542967f65f61ab7f298694f21a77f877620652234a\": container with ID starting with ccf749ef658d028d6cc3c8542967f65f61ab7f298694f21a77f877620652234a not found: ID does not exist" containerID="ccf749ef658d028d6cc3c8542967f65f61ab7f298694f21a77f877620652234a" Dec 09 16:00:14 crc kubenswrapper[4716]: I1209 16:00:14.510709 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ccf749ef658d028d6cc3c8542967f65f61ab7f298694f21a77f877620652234a"} err="failed to get container status \"ccf749ef658d028d6cc3c8542967f65f61ab7f298694f21a77f877620652234a\": rpc error: code = NotFound desc = could not find container \"ccf749ef658d028d6cc3c8542967f65f61ab7f298694f21a77f877620652234a\": container with ID starting with ccf749ef658d028d6cc3c8542967f65f61ab7f298694f21a77f877620652234a not found: ID does not exist" Dec 09 16:00:14 crc kubenswrapper[4716]: I1209 16:00:14.526249 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2fda88cc-680a-4a33-a9ea-547cf12e50d9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2fda88cc-680a-4a33-a9ea-547cf12e50d9" (UID: "2fda88cc-680a-4a33-a9ea-547cf12e50d9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:00:14 crc kubenswrapper[4716]: I1209 16:00:14.534455 4716 scope.go:117] "RemoveContainer" containerID="ccf749ef658d028d6cc3c8542967f65f61ab7f298694f21a77f877620652234a" Dec 09 16:00:14 crc kubenswrapper[4716]: E1209 16:00:14.534965 4716 kuberuntime_gc.go:150] "Failed to remove container" err="failed to get container status \"ccf749ef658d028d6cc3c8542967f65f61ab7f298694f21a77f877620652234a\": rpc error: code = NotFound desc = could not find container \"ccf749ef658d028d6cc3c8542967f65f61ab7f298694f21a77f877620652234a\": container with ID starting with ccf749ef658d028d6cc3c8542967f65f61ab7f298694f21a77f877620652234a not found: ID does not exist" containerID="ccf749ef658d028d6cc3c8542967f65f61ab7f298694f21a77f877620652234a" Dec 09 16:00:14 crc kubenswrapper[4716]: I1209 16:00:14.577997 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2fda88cc-680a-4a33-a9ea-547cf12e50d9-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 16:00:14 crc kubenswrapper[4716]: I1209 16:00:14.903465 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-brx57"] Dec 09 16:00:14 crc kubenswrapper[4716]: I1209 16:00:14.931207 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-brx57"] Dec 09 16:00:15 crc kubenswrapper[4716]: I1209 16:00:15.243968 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2fda88cc-680a-4a33-a9ea-547cf12e50d9" path="/var/lib/kubelet/pods/2fda88cc-680a-4a33-a9ea-547cf12e50d9/volumes" Dec 09 16:00:17 crc kubenswrapper[4716]: I1209 16:00:17.922658 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 16:00:17 crc kubenswrapper[4716]: I1209 16:00:17.922735 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 16:00:19 crc kubenswrapper[4716]: E1209 16:00:19.216023 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:00:24 crc kubenswrapper[4716]: E1209 16:00:24.216883 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:00:33 crc kubenswrapper[4716]: E1209 16:00:33.227248 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:00:39 crc kubenswrapper[4716]: E1209 16:00:39.215644 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:00:47 crc kubenswrapper[4716]: E1209 16:00:47.216250 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:00:47 crc kubenswrapper[4716]: I1209 16:00:47.922630 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 16:00:47 crc kubenswrapper[4716]: I1209 16:00:47.922693 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 16:00:47 crc kubenswrapper[4716]: I1209 16:00:47.922743 4716 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" Dec 09 16:00:47 crc kubenswrapper[4716]: I1209 16:00:47.923732 4716 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"64726c109e2072386d53708377a845812b6c09bfc8936cc323633819d8bc1610"} pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 16:00:47 crc kubenswrapper[4716]: I1209 16:00:47.923793 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" containerID="cri-o://64726c109e2072386d53708377a845812b6c09bfc8936cc323633819d8bc1610" gracePeriod=600 Dec 09 16:00:48 crc kubenswrapper[4716]: E1209 16:00:48.057973 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:00:49 crc kubenswrapper[4716]: I1209 16:00:49.037300 4716 generic.go:334] "Generic (PLEG): container finished" podID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerID="64726c109e2072386d53708377a845812b6c09bfc8936cc323633819d8bc1610" exitCode=0 Dec 09 16:00:49 crc kubenswrapper[4716]: I1209 16:00:49.037640 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerDied","Data":"64726c109e2072386d53708377a845812b6c09bfc8936cc323633819d8bc1610"} Dec 09 16:00:49 crc kubenswrapper[4716]: I1209 16:00:49.037686 4716 scope.go:117] "RemoveContainer" containerID="c2891ea50ddc12e555d59873e9e677f4240628d25330d260c59e12bf97ac78c4" Dec 09 16:00:49 crc kubenswrapper[4716]: I1209 16:00:49.038555 4716 scope.go:117] "RemoveContainer" containerID="64726c109e2072386d53708377a845812b6c09bfc8936cc323633819d8bc1610" Dec 09 16:00:49 crc kubenswrapper[4716]: E1209 16:00:49.038856 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:00:53 crc kubenswrapper[4716]: E1209 16:00:53.224721 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:01:00 crc kubenswrapper[4716]: I1209 16:01:00.155328 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29421601-2t7qc"] Dec 09 16:01:00 crc kubenswrapper[4716]: E1209 16:01:00.156409 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26e6b88c-3937-4dac-a165-9df99ab47f6b" containerName="collect-profiles" Dec 09 16:01:00 crc kubenswrapper[4716]: I1209 16:01:00.156425 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="26e6b88c-3937-4dac-a165-9df99ab47f6b" containerName="collect-profiles" Dec 09 16:01:00 crc kubenswrapper[4716]: E1209 16:01:00.156461 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fda88cc-680a-4a33-a9ea-547cf12e50d9" containerName="extract-utilities" Dec 09 16:01:00 crc kubenswrapper[4716]: I1209 16:01:00.156468 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fda88cc-680a-4a33-a9ea-547cf12e50d9" containerName="extract-utilities" Dec 09 16:01:00 crc kubenswrapper[4716]: E1209 16:01:00.156475 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fda88cc-680a-4a33-a9ea-547cf12e50d9" containerName="extract-content" Dec 09 16:01:00 crc kubenswrapper[4716]: I1209 16:01:00.156481 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fda88cc-680a-4a33-a9ea-547cf12e50d9" containerName="extract-content" Dec 09 16:01:00 crc kubenswrapper[4716]: E1209 16:01:00.156501 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fda88cc-680a-4a33-a9ea-547cf12e50d9" containerName="registry-server" Dec 09 16:01:00 crc kubenswrapper[4716]: I1209 16:01:00.156507 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fda88cc-680a-4a33-a9ea-547cf12e50d9" containerName="registry-server" Dec 09 16:01:00 crc kubenswrapper[4716]: I1209 16:01:00.156793 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fda88cc-680a-4a33-a9ea-547cf12e50d9" containerName="registry-server" Dec 09 16:01:00 crc kubenswrapper[4716]: I1209 16:01:00.156819 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="26e6b88c-3937-4dac-a165-9df99ab47f6b" containerName="collect-profiles" Dec 09 16:01:00 crc kubenswrapper[4716]: I1209 16:01:00.157803 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29421601-2t7qc" Dec 09 16:01:00 crc kubenswrapper[4716]: I1209 16:01:00.171521 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29421601-2t7qc"] Dec 09 16:01:00 crc kubenswrapper[4716]: E1209 16:01:00.216795 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:01:00 crc kubenswrapper[4716]: I1209 16:01:00.275438 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n624d\" (UniqueName: \"kubernetes.io/projected/5342520e-c878-41a3-8238-efd62938086a-kube-api-access-n624d\") pod \"keystone-cron-29421601-2t7qc\" (UID: \"5342520e-c878-41a3-8238-efd62938086a\") " pod="openstack/keystone-cron-29421601-2t7qc" Dec 09 16:01:00 crc kubenswrapper[4716]: I1209 16:01:00.275846 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5342520e-c878-41a3-8238-efd62938086a-fernet-keys\") pod \"keystone-cron-29421601-2t7qc\" (UID: \"5342520e-c878-41a3-8238-efd62938086a\") " pod="openstack/keystone-cron-29421601-2t7qc" Dec 09 16:01:00 crc kubenswrapper[4716]: I1209 16:01:00.276245 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5342520e-c878-41a3-8238-efd62938086a-config-data\") pod \"keystone-cron-29421601-2t7qc\" (UID: \"5342520e-c878-41a3-8238-efd62938086a\") " pod="openstack/keystone-cron-29421601-2t7qc" Dec 09 16:01:00 crc kubenswrapper[4716]: I1209 16:01:00.276371 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5342520e-c878-41a3-8238-efd62938086a-combined-ca-bundle\") pod \"keystone-cron-29421601-2t7qc\" (UID: \"5342520e-c878-41a3-8238-efd62938086a\") " pod="openstack/keystone-cron-29421601-2t7qc" Dec 09 16:01:00 crc kubenswrapper[4716]: I1209 16:01:00.378717 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n624d\" (UniqueName: \"kubernetes.io/projected/5342520e-c878-41a3-8238-efd62938086a-kube-api-access-n624d\") pod \"keystone-cron-29421601-2t7qc\" (UID: \"5342520e-c878-41a3-8238-efd62938086a\") " pod="openstack/keystone-cron-29421601-2t7qc" Dec 09 16:01:00 crc kubenswrapper[4716]: I1209 16:01:00.378905 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5342520e-c878-41a3-8238-efd62938086a-fernet-keys\") pod \"keystone-cron-29421601-2t7qc\" (UID: \"5342520e-c878-41a3-8238-efd62938086a\") " pod="openstack/keystone-cron-29421601-2t7qc" Dec 09 16:01:00 crc kubenswrapper[4716]: I1209 16:01:00.379249 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5342520e-c878-41a3-8238-efd62938086a-config-data\") pod \"keystone-cron-29421601-2t7qc\" (UID: \"5342520e-c878-41a3-8238-efd62938086a\") " pod="openstack/keystone-cron-29421601-2t7qc" Dec 09 16:01:00 crc kubenswrapper[4716]: I1209 16:01:00.379326 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5342520e-c878-41a3-8238-efd62938086a-combined-ca-bundle\") pod \"keystone-cron-29421601-2t7qc\" (UID: \"5342520e-c878-41a3-8238-efd62938086a\") " pod="openstack/keystone-cron-29421601-2t7qc" Dec 09 16:01:00 crc kubenswrapper[4716]: I1209 16:01:00.387302 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5342520e-c878-41a3-8238-efd62938086a-config-data\") pod \"keystone-cron-29421601-2t7qc\" (UID: \"5342520e-c878-41a3-8238-efd62938086a\") " pod="openstack/keystone-cron-29421601-2t7qc" Dec 09 16:01:00 crc kubenswrapper[4716]: I1209 16:01:00.387544 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5342520e-c878-41a3-8238-efd62938086a-combined-ca-bundle\") pod \"keystone-cron-29421601-2t7qc\" (UID: \"5342520e-c878-41a3-8238-efd62938086a\") " pod="openstack/keystone-cron-29421601-2t7qc" Dec 09 16:01:00 crc kubenswrapper[4716]: I1209 16:01:00.389763 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5342520e-c878-41a3-8238-efd62938086a-fernet-keys\") pod \"keystone-cron-29421601-2t7qc\" (UID: \"5342520e-c878-41a3-8238-efd62938086a\") " pod="openstack/keystone-cron-29421601-2t7qc" Dec 09 16:01:00 crc kubenswrapper[4716]: I1209 16:01:00.402558 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n624d\" (UniqueName: \"kubernetes.io/projected/5342520e-c878-41a3-8238-efd62938086a-kube-api-access-n624d\") pod \"keystone-cron-29421601-2t7qc\" (UID: \"5342520e-c878-41a3-8238-efd62938086a\") " pod="openstack/keystone-cron-29421601-2t7qc" Dec 09 16:01:00 crc kubenswrapper[4716]: I1209 16:01:00.508378 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29421601-2t7qc" Dec 09 16:01:00 crc kubenswrapper[4716]: I1209 16:01:00.965308 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29421601-2t7qc"] Dec 09 16:01:01 crc kubenswrapper[4716]: I1209 16:01:01.205213 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29421601-2t7qc" event={"ID":"5342520e-c878-41a3-8238-efd62938086a","Type":"ContainerStarted","Data":"22af5dfcb7007ef1d16c29249e0d7288d6a0054b3c5549692b21331361d519a2"} Dec 09 16:01:01 crc kubenswrapper[4716]: I1209 16:01:01.206325 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29421601-2t7qc" event={"ID":"5342520e-c878-41a3-8238-efd62938086a","Type":"ContainerStarted","Data":"7eee0c361edcbd4472a73c69a6d96ff6fcd5812303583d4dcb85a5b9e0d7568d"} Dec 09 16:01:01 crc kubenswrapper[4716]: I1209 16:01:01.229334 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29421601-2t7qc" podStartSLOduration=1.229296031 podStartE2EDuration="1.229296031s" podCreationTimestamp="2025-12-09 16:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-09 16:01:01.222104698 +0000 UTC m=+3148.376848696" watchObservedRunningTime="2025-12-09 16:01:01.229296031 +0000 UTC m=+3148.384040019" Dec 09 16:01:03 crc kubenswrapper[4716]: I1209 16:01:03.229639 4716 scope.go:117] "RemoveContainer" containerID="64726c109e2072386d53708377a845812b6c09bfc8936cc323633819d8bc1610" Dec 09 16:01:03 crc kubenswrapper[4716]: E1209 16:01:03.230698 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:01:04 crc kubenswrapper[4716]: I1209 16:01:04.237287 4716 generic.go:334] "Generic (PLEG): container finished" podID="5342520e-c878-41a3-8238-efd62938086a" containerID="22af5dfcb7007ef1d16c29249e0d7288d6a0054b3c5549692b21331361d519a2" exitCode=0 Dec 09 16:01:04 crc kubenswrapper[4716]: I1209 16:01:04.237344 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29421601-2t7qc" event={"ID":"5342520e-c878-41a3-8238-efd62938086a","Type":"ContainerDied","Data":"22af5dfcb7007ef1d16c29249e0d7288d6a0054b3c5549692b21331361d519a2"} Dec 09 16:01:05 crc kubenswrapper[4716]: I1209 16:01:05.635725 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29421601-2t7qc" Dec 09 16:01:05 crc kubenswrapper[4716]: I1209 16:01:05.824892 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5342520e-c878-41a3-8238-efd62938086a-fernet-keys\") pod \"5342520e-c878-41a3-8238-efd62938086a\" (UID: \"5342520e-c878-41a3-8238-efd62938086a\") " Dec 09 16:01:05 crc kubenswrapper[4716]: I1209 16:01:05.825116 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n624d\" (UniqueName: \"kubernetes.io/projected/5342520e-c878-41a3-8238-efd62938086a-kube-api-access-n624d\") pod \"5342520e-c878-41a3-8238-efd62938086a\" (UID: \"5342520e-c878-41a3-8238-efd62938086a\") " Dec 09 16:01:05 crc kubenswrapper[4716]: I1209 16:01:05.825349 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5342520e-c878-41a3-8238-efd62938086a-config-data\") pod \"5342520e-c878-41a3-8238-efd62938086a\" (UID: \"5342520e-c878-41a3-8238-efd62938086a\") " Dec 09 16:01:05 crc kubenswrapper[4716]: I1209 16:01:05.825581 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5342520e-c878-41a3-8238-efd62938086a-combined-ca-bundle\") pod \"5342520e-c878-41a3-8238-efd62938086a\" (UID: \"5342520e-c878-41a3-8238-efd62938086a\") " Dec 09 16:01:05 crc kubenswrapper[4716]: I1209 16:01:05.833634 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5342520e-c878-41a3-8238-efd62938086a-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "5342520e-c878-41a3-8238-efd62938086a" (UID: "5342520e-c878-41a3-8238-efd62938086a"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:01:05 crc kubenswrapper[4716]: I1209 16:01:05.834953 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5342520e-c878-41a3-8238-efd62938086a-kube-api-access-n624d" (OuterVolumeSpecName: "kube-api-access-n624d") pod "5342520e-c878-41a3-8238-efd62938086a" (UID: "5342520e-c878-41a3-8238-efd62938086a"). InnerVolumeSpecName "kube-api-access-n624d". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:01:05 crc kubenswrapper[4716]: I1209 16:01:05.865044 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5342520e-c878-41a3-8238-efd62938086a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5342520e-c878-41a3-8238-efd62938086a" (UID: "5342520e-c878-41a3-8238-efd62938086a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:01:05 crc kubenswrapper[4716]: I1209 16:01:05.891570 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5342520e-c878-41a3-8238-efd62938086a-config-data" (OuterVolumeSpecName: "config-data") pod "5342520e-c878-41a3-8238-efd62938086a" (UID: "5342520e-c878-41a3-8238-efd62938086a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:01:05 crc kubenswrapper[4716]: I1209 16:01:05.929501 4716 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5342520e-c878-41a3-8238-efd62938086a-fernet-keys\") on node \"crc\" DevicePath \"\"" Dec 09 16:01:05 crc kubenswrapper[4716]: I1209 16:01:05.929550 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n624d\" (UniqueName: \"kubernetes.io/projected/5342520e-c878-41a3-8238-efd62938086a-kube-api-access-n624d\") on node \"crc\" DevicePath \"\"" Dec 09 16:01:05 crc kubenswrapper[4716]: I1209 16:01:05.929566 4716 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5342520e-c878-41a3-8238-efd62938086a-config-data\") on node \"crc\" DevicePath \"\"" Dec 09 16:01:05 crc kubenswrapper[4716]: I1209 16:01:05.929577 4716 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5342520e-c878-41a3-8238-efd62938086a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 09 16:01:06 crc kubenswrapper[4716]: E1209 16:01:06.217043 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:01:06 crc kubenswrapper[4716]: I1209 16:01:06.256517 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29421601-2t7qc" event={"ID":"5342520e-c878-41a3-8238-efd62938086a","Type":"ContainerDied","Data":"7eee0c361edcbd4472a73c69a6d96ff6fcd5812303583d4dcb85a5b9e0d7568d"} Dec 09 16:01:06 crc kubenswrapper[4716]: I1209 16:01:06.256856 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7eee0c361edcbd4472a73c69a6d96ff6fcd5812303583d4dcb85a5b9e0d7568d" Dec 09 16:01:06 crc kubenswrapper[4716]: I1209 16:01:06.256583 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29421601-2t7qc" Dec 09 16:01:14 crc kubenswrapper[4716]: I1209 16:01:14.215017 4716 scope.go:117] "RemoveContainer" containerID="64726c109e2072386d53708377a845812b6c09bfc8936cc323633819d8bc1610" Dec 09 16:01:14 crc kubenswrapper[4716]: E1209 16:01:14.215940 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:01:15 crc kubenswrapper[4716]: E1209 16:01:15.217111 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:01:18 crc kubenswrapper[4716]: E1209 16:01:18.216107 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:01:26 crc kubenswrapper[4716]: I1209 16:01:26.214151 4716 scope.go:117] "RemoveContainer" containerID="64726c109e2072386d53708377a845812b6c09bfc8936cc323633819d8bc1610" Dec 09 16:01:26 crc kubenswrapper[4716]: E1209 16:01:26.215054 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:01:27 crc kubenswrapper[4716]: E1209 16:01:27.218820 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:01:31 crc kubenswrapper[4716]: I1209 16:01:31.420308 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-mzqjc"] Dec 09 16:01:31 crc kubenswrapper[4716]: E1209 16:01:31.421379 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5342520e-c878-41a3-8238-efd62938086a" containerName="keystone-cron" Dec 09 16:01:31 crc kubenswrapper[4716]: I1209 16:01:31.421393 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="5342520e-c878-41a3-8238-efd62938086a" containerName="keystone-cron" Dec 09 16:01:31 crc kubenswrapper[4716]: I1209 16:01:31.421616 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="5342520e-c878-41a3-8238-efd62938086a" containerName="keystone-cron" Dec 09 16:01:31 crc kubenswrapper[4716]: I1209 16:01:31.423493 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mzqjc" Dec 09 16:01:31 crc kubenswrapper[4716]: I1209 16:01:31.445757 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mzqjc"] Dec 09 16:01:31 crc kubenswrapper[4716]: I1209 16:01:31.576142 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nb6nc\" (UniqueName: \"kubernetes.io/projected/2fcb9831-0482-45b4-96c1-4d289a335005-kube-api-access-nb6nc\") pod \"community-operators-mzqjc\" (UID: \"2fcb9831-0482-45b4-96c1-4d289a335005\") " pod="openshift-marketplace/community-operators-mzqjc" Dec 09 16:01:31 crc kubenswrapper[4716]: I1209 16:01:31.576869 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2fcb9831-0482-45b4-96c1-4d289a335005-utilities\") pod \"community-operators-mzqjc\" (UID: \"2fcb9831-0482-45b4-96c1-4d289a335005\") " pod="openshift-marketplace/community-operators-mzqjc" Dec 09 16:01:31 crc kubenswrapper[4716]: I1209 16:01:31.576911 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2fcb9831-0482-45b4-96c1-4d289a335005-catalog-content\") pod \"community-operators-mzqjc\" (UID: \"2fcb9831-0482-45b4-96c1-4d289a335005\") " pod="openshift-marketplace/community-operators-mzqjc" Dec 09 16:01:31 crc kubenswrapper[4716]: I1209 16:01:31.678902 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2fcb9831-0482-45b4-96c1-4d289a335005-utilities\") pod \"community-operators-mzqjc\" (UID: \"2fcb9831-0482-45b4-96c1-4d289a335005\") " pod="openshift-marketplace/community-operators-mzqjc" Dec 09 16:01:31 crc kubenswrapper[4716]: I1209 16:01:31.678954 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2fcb9831-0482-45b4-96c1-4d289a335005-catalog-content\") pod \"community-operators-mzqjc\" (UID: \"2fcb9831-0482-45b4-96c1-4d289a335005\") " pod="openshift-marketplace/community-operators-mzqjc" Dec 09 16:01:31 crc kubenswrapper[4716]: I1209 16:01:31.679069 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nb6nc\" (UniqueName: \"kubernetes.io/projected/2fcb9831-0482-45b4-96c1-4d289a335005-kube-api-access-nb6nc\") pod \"community-operators-mzqjc\" (UID: \"2fcb9831-0482-45b4-96c1-4d289a335005\") " pod="openshift-marketplace/community-operators-mzqjc" Dec 09 16:01:31 crc kubenswrapper[4716]: I1209 16:01:31.679455 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2fcb9831-0482-45b4-96c1-4d289a335005-utilities\") pod \"community-operators-mzqjc\" (UID: \"2fcb9831-0482-45b4-96c1-4d289a335005\") " pod="openshift-marketplace/community-operators-mzqjc" Dec 09 16:01:31 crc kubenswrapper[4716]: I1209 16:01:31.679617 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2fcb9831-0482-45b4-96c1-4d289a335005-catalog-content\") pod \"community-operators-mzqjc\" (UID: \"2fcb9831-0482-45b4-96c1-4d289a335005\") " pod="openshift-marketplace/community-operators-mzqjc" Dec 09 16:01:31 crc kubenswrapper[4716]: I1209 16:01:31.701404 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nb6nc\" (UniqueName: \"kubernetes.io/projected/2fcb9831-0482-45b4-96c1-4d289a335005-kube-api-access-nb6nc\") pod \"community-operators-mzqjc\" (UID: \"2fcb9831-0482-45b4-96c1-4d289a335005\") " pod="openshift-marketplace/community-operators-mzqjc" Dec 09 16:01:31 crc kubenswrapper[4716]: I1209 16:01:31.793243 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mzqjc" Dec 09 16:01:32 crc kubenswrapper[4716]: I1209 16:01:32.318913 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mzqjc"] Dec 09 16:01:32 crc kubenswrapper[4716]: W1209 16:01:32.325198 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2fcb9831_0482_45b4_96c1_4d289a335005.slice/crio-b856fae6cfce12a72a2300764025f581f77fbe3edf312f21bf6dc278a50245bd WatchSource:0}: Error finding container b856fae6cfce12a72a2300764025f581f77fbe3edf312f21bf6dc278a50245bd: Status 404 returned error can't find the container with id b856fae6cfce12a72a2300764025f581f77fbe3edf312f21bf6dc278a50245bd Dec 09 16:01:32 crc kubenswrapper[4716]: I1209 16:01:32.530261 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mzqjc" event={"ID":"2fcb9831-0482-45b4-96c1-4d289a335005","Type":"ContainerStarted","Data":"b856fae6cfce12a72a2300764025f581f77fbe3edf312f21bf6dc278a50245bd"} Dec 09 16:01:33 crc kubenswrapper[4716]: E1209 16:01:33.226454 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:01:33 crc kubenswrapper[4716]: I1209 16:01:33.543490 4716 generic.go:334] "Generic (PLEG): container finished" podID="2fcb9831-0482-45b4-96c1-4d289a335005" containerID="a7d45bd75054301add0e7099bdc884d60c1019ac1fcd021ae67183c825016395" exitCode=0 Dec 09 16:01:33 crc kubenswrapper[4716]: I1209 16:01:33.543566 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mzqjc" event={"ID":"2fcb9831-0482-45b4-96c1-4d289a335005","Type":"ContainerDied","Data":"a7d45bd75054301add0e7099bdc884d60c1019ac1fcd021ae67183c825016395"} Dec 09 16:01:34 crc kubenswrapper[4716]: I1209 16:01:34.556058 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mzqjc" event={"ID":"2fcb9831-0482-45b4-96c1-4d289a335005","Type":"ContainerStarted","Data":"32a836c0c482a2211854ffe41d95ae9b757ce09debaccca3adf9d671e41476de"} Dec 09 16:01:36 crc kubenswrapper[4716]: I1209 16:01:36.580880 4716 generic.go:334] "Generic (PLEG): container finished" podID="2fcb9831-0482-45b4-96c1-4d289a335005" containerID="32a836c0c482a2211854ffe41d95ae9b757ce09debaccca3adf9d671e41476de" exitCode=0 Dec 09 16:01:36 crc kubenswrapper[4716]: I1209 16:01:36.580981 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mzqjc" event={"ID":"2fcb9831-0482-45b4-96c1-4d289a335005","Type":"ContainerDied","Data":"32a836c0c482a2211854ffe41d95ae9b757ce09debaccca3adf9d671e41476de"} Dec 09 16:01:38 crc kubenswrapper[4716]: E1209 16:01:38.217127 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:01:38 crc kubenswrapper[4716]: I1209 16:01:38.601901 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mzqjc" event={"ID":"2fcb9831-0482-45b4-96c1-4d289a335005","Type":"ContainerStarted","Data":"e5e2776e92a69d8c271253cad1c41ef9fcdd77f7e2cdc54293abd274a0973df3"} Dec 09 16:01:38 crc kubenswrapper[4716]: I1209 16:01:38.626272 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-mzqjc" podStartSLOduration=2.99336938 podStartE2EDuration="7.626248908s" podCreationTimestamp="2025-12-09 16:01:31 +0000 UTC" firstStartedPulling="2025-12-09 16:01:33.545919575 +0000 UTC m=+3180.700663563" lastFinishedPulling="2025-12-09 16:01:38.178799103 +0000 UTC m=+3185.333543091" observedRunningTime="2025-12-09 16:01:38.61639614 +0000 UTC m=+3185.771140148" watchObservedRunningTime="2025-12-09 16:01:38.626248908 +0000 UTC m=+3185.780992896" Dec 09 16:01:41 crc kubenswrapper[4716]: I1209 16:01:41.214935 4716 scope.go:117] "RemoveContainer" containerID="64726c109e2072386d53708377a845812b6c09bfc8936cc323633819d8bc1610" Dec 09 16:01:41 crc kubenswrapper[4716]: E1209 16:01:41.216175 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:01:41 crc kubenswrapper[4716]: I1209 16:01:41.793543 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-mzqjc" Dec 09 16:01:41 crc kubenswrapper[4716]: I1209 16:01:41.793598 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-mzqjc" Dec 09 16:01:41 crc kubenswrapper[4716]: I1209 16:01:41.870998 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-mzqjc" Dec 09 16:01:47 crc kubenswrapper[4716]: E1209 16:01:47.216519 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:01:51 crc kubenswrapper[4716]: I1209 16:01:51.848321 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-mzqjc" Dec 09 16:01:51 crc kubenswrapper[4716]: I1209 16:01:51.906664 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mzqjc"] Dec 09 16:01:52 crc kubenswrapper[4716]: E1209 16:01:52.216437 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:01:52 crc kubenswrapper[4716]: I1209 16:01:52.737035 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-mzqjc" podUID="2fcb9831-0482-45b4-96c1-4d289a335005" containerName="registry-server" containerID="cri-o://e5e2776e92a69d8c271253cad1c41ef9fcdd77f7e2cdc54293abd274a0973df3" gracePeriod=2 Dec 09 16:01:53 crc kubenswrapper[4716]: I1209 16:01:53.244725 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mzqjc" Dec 09 16:01:53 crc kubenswrapper[4716]: I1209 16:01:53.411851 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2fcb9831-0482-45b4-96c1-4d289a335005-catalog-content\") pod \"2fcb9831-0482-45b4-96c1-4d289a335005\" (UID: \"2fcb9831-0482-45b4-96c1-4d289a335005\") " Dec 09 16:01:53 crc kubenswrapper[4716]: I1209 16:01:53.411973 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2fcb9831-0482-45b4-96c1-4d289a335005-utilities\") pod \"2fcb9831-0482-45b4-96c1-4d289a335005\" (UID: \"2fcb9831-0482-45b4-96c1-4d289a335005\") " Dec 09 16:01:53 crc kubenswrapper[4716]: I1209 16:01:53.412056 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nb6nc\" (UniqueName: \"kubernetes.io/projected/2fcb9831-0482-45b4-96c1-4d289a335005-kube-api-access-nb6nc\") pod \"2fcb9831-0482-45b4-96c1-4d289a335005\" (UID: \"2fcb9831-0482-45b4-96c1-4d289a335005\") " Dec 09 16:01:53 crc kubenswrapper[4716]: I1209 16:01:53.412765 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2fcb9831-0482-45b4-96c1-4d289a335005-utilities" (OuterVolumeSpecName: "utilities") pod "2fcb9831-0482-45b4-96c1-4d289a335005" (UID: "2fcb9831-0482-45b4-96c1-4d289a335005"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:01:53 crc kubenswrapper[4716]: I1209 16:01:53.426091 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2fcb9831-0482-45b4-96c1-4d289a335005-kube-api-access-nb6nc" (OuterVolumeSpecName: "kube-api-access-nb6nc") pod "2fcb9831-0482-45b4-96c1-4d289a335005" (UID: "2fcb9831-0482-45b4-96c1-4d289a335005"). InnerVolumeSpecName "kube-api-access-nb6nc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:01:53 crc kubenswrapper[4716]: I1209 16:01:53.463974 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2fcb9831-0482-45b4-96c1-4d289a335005-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2fcb9831-0482-45b4-96c1-4d289a335005" (UID: "2fcb9831-0482-45b4-96c1-4d289a335005"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:01:53 crc kubenswrapper[4716]: I1209 16:01:53.515578 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2fcb9831-0482-45b4-96c1-4d289a335005-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 16:01:53 crc kubenswrapper[4716]: I1209 16:01:53.515635 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2fcb9831-0482-45b4-96c1-4d289a335005-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 16:01:53 crc kubenswrapper[4716]: I1209 16:01:53.515649 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nb6nc\" (UniqueName: \"kubernetes.io/projected/2fcb9831-0482-45b4-96c1-4d289a335005-kube-api-access-nb6nc\") on node \"crc\" DevicePath \"\"" Dec 09 16:01:53 crc kubenswrapper[4716]: I1209 16:01:53.751690 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mzqjc" Dec 09 16:01:53 crc kubenswrapper[4716]: I1209 16:01:53.751677 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mzqjc" event={"ID":"2fcb9831-0482-45b4-96c1-4d289a335005","Type":"ContainerDied","Data":"e5e2776e92a69d8c271253cad1c41ef9fcdd77f7e2cdc54293abd274a0973df3"} Dec 09 16:01:53 crc kubenswrapper[4716]: I1209 16:01:53.751613 4716 generic.go:334] "Generic (PLEG): container finished" podID="2fcb9831-0482-45b4-96c1-4d289a335005" containerID="e5e2776e92a69d8c271253cad1c41ef9fcdd77f7e2cdc54293abd274a0973df3" exitCode=0 Dec 09 16:01:53 crc kubenswrapper[4716]: I1209 16:01:53.752137 4716 scope.go:117] "RemoveContainer" containerID="e5e2776e92a69d8c271253cad1c41ef9fcdd77f7e2cdc54293abd274a0973df3" Dec 09 16:01:53 crc kubenswrapper[4716]: I1209 16:01:53.752168 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mzqjc" event={"ID":"2fcb9831-0482-45b4-96c1-4d289a335005","Type":"ContainerDied","Data":"b856fae6cfce12a72a2300764025f581f77fbe3edf312f21bf6dc278a50245bd"} Dec 09 16:01:53 crc kubenswrapper[4716]: I1209 16:01:53.793357 4716 scope.go:117] "RemoveContainer" containerID="32a836c0c482a2211854ffe41d95ae9b757ce09debaccca3adf9d671e41476de" Dec 09 16:01:53 crc kubenswrapper[4716]: I1209 16:01:53.807877 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mzqjc"] Dec 09 16:01:53 crc kubenswrapper[4716]: I1209 16:01:53.816442 4716 scope.go:117] "RemoveContainer" containerID="a7d45bd75054301add0e7099bdc884d60c1019ac1fcd021ae67183c825016395" Dec 09 16:01:53 crc kubenswrapper[4716]: I1209 16:01:53.819908 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-mzqjc"] Dec 09 16:01:53 crc kubenswrapper[4716]: I1209 16:01:53.885096 4716 scope.go:117] "RemoveContainer" containerID="e5e2776e92a69d8c271253cad1c41ef9fcdd77f7e2cdc54293abd274a0973df3" Dec 09 16:01:53 crc kubenswrapper[4716]: E1209 16:01:53.887907 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e5e2776e92a69d8c271253cad1c41ef9fcdd77f7e2cdc54293abd274a0973df3\": container with ID starting with e5e2776e92a69d8c271253cad1c41ef9fcdd77f7e2cdc54293abd274a0973df3 not found: ID does not exist" containerID="e5e2776e92a69d8c271253cad1c41ef9fcdd77f7e2cdc54293abd274a0973df3" Dec 09 16:01:53 crc kubenswrapper[4716]: I1209 16:01:53.887982 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5e2776e92a69d8c271253cad1c41ef9fcdd77f7e2cdc54293abd274a0973df3"} err="failed to get container status \"e5e2776e92a69d8c271253cad1c41ef9fcdd77f7e2cdc54293abd274a0973df3\": rpc error: code = NotFound desc = could not find container \"e5e2776e92a69d8c271253cad1c41ef9fcdd77f7e2cdc54293abd274a0973df3\": container with ID starting with e5e2776e92a69d8c271253cad1c41ef9fcdd77f7e2cdc54293abd274a0973df3 not found: ID does not exist" Dec 09 16:01:53 crc kubenswrapper[4716]: I1209 16:01:53.888024 4716 scope.go:117] "RemoveContainer" containerID="32a836c0c482a2211854ffe41d95ae9b757ce09debaccca3adf9d671e41476de" Dec 09 16:01:53 crc kubenswrapper[4716]: E1209 16:01:53.888420 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"32a836c0c482a2211854ffe41d95ae9b757ce09debaccca3adf9d671e41476de\": container with ID starting with 32a836c0c482a2211854ffe41d95ae9b757ce09debaccca3adf9d671e41476de not found: ID does not exist" containerID="32a836c0c482a2211854ffe41d95ae9b757ce09debaccca3adf9d671e41476de" Dec 09 16:01:53 crc kubenswrapper[4716]: I1209 16:01:53.888471 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"32a836c0c482a2211854ffe41d95ae9b757ce09debaccca3adf9d671e41476de"} err="failed to get container status \"32a836c0c482a2211854ffe41d95ae9b757ce09debaccca3adf9d671e41476de\": rpc error: code = NotFound desc = could not find container \"32a836c0c482a2211854ffe41d95ae9b757ce09debaccca3adf9d671e41476de\": container with ID starting with 32a836c0c482a2211854ffe41d95ae9b757ce09debaccca3adf9d671e41476de not found: ID does not exist" Dec 09 16:01:53 crc kubenswrapper[4716]: I1209 16:01:53.888500 4716 scope.go:117] "RemoveContainer" containerID="a7d45bd75054301add0e7099bdc884d60c1019ac1fcd021ae67183c825016395" Dec 09 16:01:53 crc kubenswrapper[4716]: E1209 16:01:53.888944 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7d45bd75054301add0e7099bdc884d60c1019ac1fcd021ae67183c825016395\": container with ID starting with a7d45bd75054301add0e7099bdc884d60c1019ac1fcd021ae67183c825016395 not found: ID does not exist" containerID="a7d45bd75054301add0e7099bdc884d60c1019ac1fcd021ae67183c825016395" Dec 09 16:01:53 crc kubenswrapper[4716]: I1209 16:01:53.888979 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7d45bd75054301add0e7099bdc884d60c1019ac1fcd021ae67183c825016395"} err="failed to get container status \"a7d45bd75054301add0e7099bdc884d60c1019ac1fcd021ae67183c825016395\": rpc error: code = NotFound desc = could not find container \"a7d45bd75054301add0e7099bdc884d60c1019ac1fcd021ae67183c825016395\": container with ID starting with a7d45bd75054301add0e7099bdc884d60c1019ac1fcd021ae67183c825016395 not found: ID does not exist" Dec 09 16:01:55 crc kubenswrapper[4716]: I1209 16:01:55.226785 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2fcb9831-0482-45b4-96c1-4d289a335005" path="/var/lib/kubelet/pods/2fcb9831-0482-45b4-96c1-4d289a335005/volumes" Dec 09 16:01:56 crc kubenswrapper[4716]: I1209 16:01:56.213822 4716 scope.go:117] "RemoveContainer" containerID="64726c109e2072386d53708377a845812b6c09bfc8936cc323633819d8bc1610" Dec 09 16:01:56 crc kubenswrapper[4716]: E1209 16:01:56.214525 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:01:58 crc kubenswrapper[4716]: E1209 16:01:58.216134 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:02:03 crc kubenswrapper[4716]: E1209 16:02:03.224548 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:02:09 crc kubenswrapper[4716]: I1209 16:02:09.214666 4716 scope.go:117] "RemoveContainer" containerID="64726c109e2072386d53708377a845812b6c09bfc8936cc323633819d8bc1610" Dec 09 16:02:09 crc kubenswrapper[4716]: E1209 16:02:09.215504 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:02:13 crc kubenswrapper[4716]: E1209 16:02:13.226539 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:02:14 crc kubenswrapper[4716]: E1209 16:02:14.216676 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:02:24 crc kubenswrapper[4716]: I1209 16:02:24.214782 4716 scope.go:117] "RemoveContainer" containerID="64726c109e2072386d53708377a845812b6c09bfc8936cc323633819d8bc1610" Dec 09 16:02:24 crc kubenswrapper[4716]: E1209 16:02:24.215690 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:02:25 crc kubenswrapper[4716]: E1209 16:02:25.216519 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:02:27 crc kubenswrapper[4716]: I1209 16:02:27.083296 4716 generic.go:334] "Generic (PLEG): container finished" podID="9b993fbb-2885-48c8-83b7-9e07287e790e" containerID="d4c711377723caac7497e0dd7c8e056dc04c1a0ba08a02b3720c3f72977f0877" exitCode=2 Dec 09 16:02:27 crc kubenswrapper[4716]: I1209 16:02:27.083384 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mzt6l" event={"ID":"9b993fbb-2885-48c8-83b7-9e07287e790e","Type":"ContainerDied","Data":"d4c711377723caac7497e0dd7c8e056dc04c1a0ba08a02b3720c3f72977f0877"} Dec 09 16:02:27 crc kubenswrapper[4716]: E1209 16:02:27.218182 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:02:28 crc kubenswrapper[4716]: I1209 16:02:28.692883 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mzt6l" Dec 09 16:02:28 crc kubenswrapper[4716]: I1209 16:02:28.824728 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9b993fbb-2885-48c8-83b7-9e07287e790e-ssh-key\") pod \"9b993fbb-2885-48c8-83b7-9e07287e790e\" (UID: \"9b993fbb-2885-48c8-83b7-9e07287e790e\") " Dec 09 16:02:28 crc kubenswrapper[4716]: I1209 16:02:28.825172 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9b993fbb-2885-48c8-83b7-9e07287e790e-inventory\") pod \"9b993fbb-2885-48c8-83b7-9e07287e790e\" (UID: \"9b993fbb-2885-48c8-83b7-9e07287e790e\") " Dec 09 16:02:28 crc kubenswrapper[4716]: I1209 16:02:28.825258 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ssvzx\" (UniqueName: \"kubernetes.io/projected/9b993fbb-2885-48c8-83b7-9e07287e790e-kube-api-access-ssvzx\") pod \"9b993fbb-2885-48c8-83b7-9e07287e790e\" (UID: \"9b993fbb-2885-48c8-83b7-9e07287e790e\") " Dec 09 16:02:28 crc kubenswrapper[4716]: I1209 16:02:28.831668 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b993fbb-2885-48c8-83b7-9e07287e790e-kube-api-access-ssvzx" (OuterVolumeSpecName: "kube-api-access-ssvzx") pod "9b993fbb-2885-48c8-83b7-9e07287e790e" (UID: "9b993fbb-2885-48c8-83b7-9e07287e790e"). InnerVolumeSpecName "kube-api-access-ssvzx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:02:28 crc kubenswrapper[4716]: I1209 16:02:28.861387 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b993fbb-2885-48c8-83b7-9e07287e790e-inventory" (OuterVolumeSpecName: "inventory") pod "9b993fbb-2885-48c8-83b7-9e07287e790e" (UID: "9b993fbb-2885-48c8-83b7-9e07287e790e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:02:28 crc kubenswrapper[4716]: I1209 16:02:28.865096 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b993fbb-2885-48c8-83b7-9e07287e790e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "9b993fbb-2885-48c8-83b7-9e07287e790e" (UID: "9b993fbb-2885-48c8-83b7-9e07287e790e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:02:28 crc kubenswrapper[4716]: I1209 16:02:28.928062 4716 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9b993fbb-2885-48c8-83b7-9e07287e790e-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 09 16:02:28 crc kubenswrapper[4716]: I1209 16:02:28.928101 4716 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9b993fbb-2885-48c8-83b7-9e07287e790e-inventory\") on node \"crc\" DevicePath \"\"" Dec 09 16:02:28 crc kubenswrapper[4716]: I1209 16:02:28.928114 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ssvzx\" (UniqueName: \"kubernetes.io/projected/9b993fbb-2885-48c8-83b7-9e07287e790e-kube-api-access-ssvzx\") on node \"crc\" DevicePath \"\"" Dec 09 16:02:29 crc kubenswrapper[4716]: I1209 16:02:29.113114 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mzt6l" event={"ID":"9b993fbb-2885-48c8-83b7-9e07287e790e","Type":"ContainerDied","Data":"0b0adf113e85b29d93e776f4e5e992b33c51d97fab24b6be860a95bdb4e9c10c"} Dec 09 16:02:29 crc kubenswrapper[4716]: I1209 16:02:29.113161 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0b0adf113e85b29d93e776f4e5e992b33c51d97fab24b6be860a95bdb4e9c10c" Dec 09 16:02:29 crc kubenswrapper[4716]: I1209 16:02:29.113186 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-mzt6l" Dec 09 16:02:37 crc kubenswrapper[4716]: I1209 16:02:37.214289 4716 scope.go:117] "RemoveContainer" containerID="64726c109e2072386d53708377a845812b6c09bfc8936cc323633819d8bc1610" Dec 09 16:02:37 crc kubenswrapper[4716]: E1209 16:02:37.215044 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:02:40 crc kubenswrapper[4716]: E1209 16:02:40.215949 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:02:41 crc kubenswrapper[4716]: E1209 16:02:41.215783 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:02:50 crc kubenswrapper[4716]: I1209 16:02:50.213358 4716 scope.go:117] "RemoveContainer" containerID="64726c109e2072386d53708377a845812b6c09bfc8936cc323633819d8bc1610" Dec 09 16:02:50 crc kubenswrapper[4716]: E1209 16:02:50.214212 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:02:53 crc kubenswrapper[4716]: E1209 16:02:53.226026 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:02:54 crc kubenswrapper[4716]: E1209 16:02:54.217084 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:03:03 crc kubenswrapper[4716]: I1209 16:03:03.240022 4716 scope.go:117] "RemoveContainer" containerID="64726c109e2072386d53708377a845812b6c09bfc8936cc323633819d8bc1610" Dec 09 16:03:03 crc kubenswrapper[4716]: E1209 16:03:03.241147 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:03:06 crc kubenswrapper[4716]: I1209 16:03:06.069695 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kp8f2"] Dec 09 16:03:06 crc kubenswrapper[4716]: E1209 16:03:06.070902 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fcb9831-0482-45b4-96c1-4d289a335005" containerName="registry-server" Dec 09 16:03:06 crc kubenswrapper[4716]: I1209 16:03:06.070921 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fcb9831-0482-45b4-96c1-4d289a335005" containerName="registry-server" Dec 09 16:03:06 crc kubenswrapper[4716]: E1209 16:03:06.070976 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fcb9831-0482-45b4-96c1-4d289a335005" containerName="extract-utilities" Dec 09 16:03:06 crc kubenswrapper[4716]: I1209 16:03:06.070989 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fcb9831-0482-45b4-96c1-4d289a335005" containerName="extract-utilities" Dec 09 16:03:06 crc kubenswrapper[4716]: E1209 16:03:06.071001 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fcb9831-0482-45b4-96c1-4d289a335005" containerName="extract-content" Dec 09 16:03:06 crc kubenswrapper[4716]: I1209 16:03:06.071010 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fcb9831-0482-45b4-96c1-4d289a335005" containerName="extract-content" Dec 09 16:03:06 crc kubenswrapper[4716]: E1209 16:03:06.071038 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b993fbb-2885-48c8-83b7-9e07287e790e" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 16:03:06 crc kubenswrapper[4716]: I1209 16:03:06.071051 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b993fbb-2885-48c8-83b7-9e07287e790e" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 16:03:06 crc kubenswrapper[4716]: I1209 16:03:06.071334 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fcb9831-0482-45b4-96c1-4d289a335005" containerName="registry-server" Dec 09 16:03:06 crc kubenswrapper[4716]: I1209 16:03:06.071379 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b993fbb-2885-48c8-83b7-9e07287e790e" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 16:03:06 crc kubenswrapper[4716]: I1209 16:03:06.072557 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kp8f2" Dec 09 16:03:06 crc kubenswrapper[4716]: I1209 16:03:06.074948 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 09 16:03:06 crc kubenswrapper[4716]: I1209 16:03:06.075263 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 09 16:03:06 crc kubenswrapper[4716]: I1209 16:03:06.075399 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-vjwcm" Dec 09 16:03:06 crc kubenswrapper[4716]: I1209 16:03:06.076437 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 09 16:03:06 crc kubenswrapper[4716]: I1209 16:03:06.102094 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kp8f2"] Dec 09 16:03:06 crc kubenswrapper[4716]: I1209 16:03:06.202007 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b22ffe51-8b47-48a6-9292-0577f680d56b-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-kp8f2\" (UID: \"b22ffe51-8b47-48a6-9292-0577f680d56b\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kp8f2" Dec 09 16:03:06 crc kubenswrapper[4716]: I1209 16:03:06.202280 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ntr22\" (UniqueName: \"kubernetes.io/projected/b22ffe51-8b47-48a6-9292-0577f680d56b-kube-api-access-ntr22\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-kp8f2\" (UID: \"b22ffe51-8b47-48a6-9292-0577f680d56b\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kp8f2" Dec 09 16:03:06 crc kubenswrapper[4716]: I1209 16:03:06.202566 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b22ffe51-8b47-48a6-9292-0577f680d56b-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-kp8f2\" (UID: \"b22ffe51-8b47-48a6-9292-0577f680d56b\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kp8f2" Dec 09 16:03:06 crc kubenswrapper[4716]: E1209 16:03:06.217152 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:03:06 crc kubenswrapper[4716]: I1209 16:03:06.304946 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b22ffe51-8b47-48a6-9292-0577f680d56b-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-kp8f2\" (UID: \"b22ffe51-8b47-48a6-9292-0577f680d56b\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kp8f2" Dec 09 16:03:06 crc kubenswrapper[4716]: I1209 16:03:06.305132 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ntr22\" (UniqueName: \"kubernetes.io/projected/b22ffe51-8b47-48a6-9292-0577f680d56b-kube-api-access-ntr22\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-kp8f2\" (UID: \"b22ffe51-8b47-48a6-9292-0577f680d56b\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kp8f2" Dec 09 16:03:06 crc kubenswrapper[4716]: I1209 16:03:06.305274 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b22ffe51-8b47-48a6-9292-0577f680d56b-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-kp8f2\" (UID: \"b22ffe51-8b47-48a6-9292-0577f680d56b\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kp8f2" Dec 09 16:03:06 crc kubenswrapper[4716]: I1209 16:03:06.313342 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b22ffe51-8b47-48a6-9292-0577f680d56b-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-kp8f2\" (UID: \"b22ffe51-8b47-48a6-9292-0577f680d56b\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kp8f2" Dec 09 16:03:06 crc kubenswrapper[4716]: I1209 16:03:06.325483 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b22ffe51-8b47-48a6-9292-0577f680d56b-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-kp8f2\" (UID: \"b22ffe51-8b47-48a6-9292-0577f680d56b\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kp8f2" Dec 09 16:03:06 crc kubenswrapper[4716]: I1209 16:03:06.360636 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ntr22\" (UniqueName: \"kubernetes.io/projected/b22ffe51-8b47-48a6-9292-0577f680d56b-kube-api-access-ntr22\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-kp8f2\" (UID: \"b22ffe51-8b47-48a6-9292-0577f680d56b\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kp8f2" Dec 09 16:03:06 crc kubenswrapper[4716]: I1209 16:03:06.404257 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kp8f2" Dec 09 16:03:07 crc kubenswrapper[4716]: I1209 16:03:07.180911 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kp8f2"] Dec 09 16:03:07 crc kubenswrapper[4716]: E1209 16:03:07.215432 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:03:07 crc kubenswrapper[4716]: I1209 16:03:07.557788 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kp8f2" event={"ID":"b22ffe51-8b47-48a6-9292-0577f680d56b","Type":"ContainerStarted","Data":"f370c19ca1339ee2863ea390568e9c2f6117195dcecb2742cdc96cdd85eff0b1"} Dec 09 16:03:08 crc kubenswrapper[4716]: I1209 16:03:08.571364 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kp8f2" event={"ID":"b22ffe51-8b47-48a6-9292-0577f680d56b","Type":"ContainerStarted","Data":"a6bfd048774051b98031e0b06ba774da6aa9d638a7e0c3a77eaa34184ba90dc5"} Dec 09 16:03:08 crc kubenswrapper[4716]: I1209 16:03:08.603002 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kp8f2" podStartSLOduration=1.982860843 podStartE2EDuration="2.602961601s" podCreationTimestamp="2025-12-09 16:03:06 +0000 UTC" firstStartedPulling="2025-12-09 16:03:07.179487843 +0000 UTC m=+3274.334231821" lastFinishedPulling="2025-12-09 16:03:07.799588591 +0000 UTC m=+3274.954332579" observedRunningTime="2025-12-09 16:03:08.597512678 +0000 UTC m=+3275.752256666" watchObservedRunningTime="2025-12-09 16:03:08.602961601 +0000 UTC m=+3275.757705589" Dec 09 16:03:14 crc kubenswrapper[4716]: I1209 16:03:14.213940 4716 scope.go:117] "RemoveContainer" containerID="64726c109e2072386d53708377a845812b6c09bfc8936cc323633819d8bc1610" Dec 09 16:03:14 crc kubenswrapper[4716]: E1209 16:03:14.214794 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:03:19 crc kubenswrapper[4716]: E1209 16:03:19.216445 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:03:20 crc kubenswrapper[4716]: E1209 16:03:20.216350 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:03:28 crc kubenswrapper[4716]: I1209 16:03:28.215080 4716 scope.go:117] "RemoveContainer" containerID="64726c109e2072386d53708377a845812b6c09bfc8936cc323633819d8bc1610" Dec 09 16:03:28 crc kubenswrapper[4716]: E1209 16:03:28.216434 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:03:32 crc kubenswrapper[4716]: I1209 16:03:32.217595 4716 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 09 16:03:32 crc kubenswrapper[4716]: E1209 16:03:32.313388 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 09 16:03:32 crc kubenswrapper[4716]: E1209 16:03:32.313479 4716 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 09 16:03:32 crc kubenswrapper[4716]: E1209 16:03:32.313841 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qfm49,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-msshl_openstack(239c4119-944c-46e1-9425-285eeb6e0204): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 16:03:32 crc kubenswrapper[4716]: E1209 16:03:32.315002 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:03:34 crc kubenswrapper[4716]: E1209 16:03:34.335964 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 16:03:34 crc kubenswrapper[4716]: E1209 16:03:34.336553 4716 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 16:03:34 crc kubenswrapper[4716]: E1209 16:03:34.336995 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5fbh9fh77h667h669h94h589h59dh5bdh5bch9fh68h689h699h559h549h56fh5f5h68fh557h5cch7fh66ch6ch86h56h75h55fh58h5ffhf9h5fdq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lz724,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(29cebb2d-8cdb-49de-a29d-1d02808e46a9): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 16:03:34 crc kubenswrapper[4716]: E1209 16:03:34.338697 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:03:39 crc kubenswrapper[4716]: I1209 16:03:39.214181 4716 scope.go:117] "RemoveContainer" containerID="64726c109e2072386d53708377a845812b6c09bfc8936cc323633819d8bc1610" Dec 09 16:03:39 crc kubenswrapper[4716]: E1209 16:03:39.215015 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:03:46 crc kubenswrapper[4716]: E1209 16:03:46.216026 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:03:46 crc kubenswrapper[4716]: E1209 16:03:46.216087 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:03:54 crc kubenswrapper[4716]: I1209 16:03:54.343031 4716 scope.go:117] "RemoveContainer" containerID="64726c109e2072386d53708377a845812b6c09bfc8936cc323633819d8bc1610" Dec 09 16:03:54 crc kubenswrapper[4716]: E1209 16:03:54.343904 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:03:58 crc kubenswrapper[4716]: E1209 16:03:58.552403 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:03:58 crc kubenswrapper[4716]: E1209 16:03:58.563126 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:04:07 crc kubenswrapper[4716]: I1209 16:04:07.214468 4716 scope.go:117] "RemoveContainer" containerID="64726c109e2072386d53708377a845812b6c09bfc8936cc323633819d8bc1610" Dec 09 16:04:07 crc kubenswrapper[4716]: E1209 16:04:07.215421 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:04:11 crc kubenswrapper[4716]: E1209 16:04:11.233389 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:04:12 crc kubenswrapper[4716]: E1209 16:04:12.217816 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:04:19 crc kubenswrapper[4716]: I1209 16:04:19.214606 4716 scope.go:117] "RemoveContainer" containerID="64726c109e2072386d53708377a845812b6c09bfc8936cc323633819d8bc1610" Dec 09 16:04:19 crc kubenswrapper[4716]: E1209 16:04:19.216192 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:04:23 crc kubenswrapper[4716]: E1209 16:04:23.224466 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:04:25 crc kubenswrapper[4716]: E1209 16:04:25.215646 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:04:32 crc kubenswrapper[4716]: I1209 16:04:32.214172 4716 scope.go:117] "RemoveContainer" containerID="64726c109e2072386d53708377a845812b6c09bfc8936cc323633819d8bc1610" Dec 09 16:04:32 crc kubenswrapper[4716]: E1209 16:04:32.215770 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:04:35 crc kubenswrapper[4716]: E1209 16:04:35.228448 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:04:38 crc kubenswrapper[4716]: E1209 16:04:38.216656 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:04:45 crc kubenswrapper[4716]: I1209 16:04:45.213431 4716 scope.go:117] "RemoveContainer" containerID="64726c109e2072386d53708377a845812b6c09bfc8936cc323633819d8bc1610" Dec 09 16:04:45 crc kubenswrapper[4716]: E1209 16:04:45.214212 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:04:46 crc kubenswrapper[4716]: E1209 16:04:46.217418 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:04:51 crc kubenswrapper[4716]: E1209 16:04:51.217049 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:04:56 crc kubenswrapper[4716]: I1209 16:04:56.214324 4716 scope.go:117] "RemoveContainer" containerID="64726c109e2072386d53708377a845812b6c09bfc8936cc323633819d8bc1610" Dec 09 16:04:56 crc kubenswrapper[4716]: E1209 16:04:56.215381 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:04:59 crc kubenswrapper[4716]: E1209 16:04:59.216191 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:05:02 crc kubenswrapper[4716]: E1209 16:05:02.216982 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:05:08 crc kubenswrapper[4716]: I1209 16:05:08.213911 4716 scope.go:117] "RemoveContainer" containerID="64726c109e2072386d53708377a845812b6c09bfc8936cc323633819d8bc1610" Dec 09 16:05:08 crc kubenswrapper[4716]: E1209 16:05:08.215552 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:05:14 crc kubenswrapper[4716]: E1209 16:05:14.216598 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:05:17 crc kubenswrapper[4716]: E1209 16:05:17.216705 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:05:23 crc kubenswrapper[4716]: I1209 16:05:23.221396 4716 scope.go:117] "RemoveContainer" containerID="64726c109e2072386d53708377a845812b6c09bfc8936cc323633819d8bc1610" Dec 09 16:05:23 crc kubenswrapper[4716]: E1209 16:05:23.222282 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:05:26 crc kubenswrapper[4716]: E1209 16:05:26.216267 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:05:28 crc kubenswrapper[4716]: E1209 16:05:28.215854 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:05:37 crc kubenswrapper[4716]: I1209 16:05:37.215530 4716 scope.go:117] "RemoveContainer" containerID="64726c109e2072386d53708377a845812b6c09bfc8936cc323633819d8bc1610" Dec 09 16:05:37 crc kubenswrapper[4716]: E1209 16:05:37.216308 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:05:40 crc kubenswrapper[4716]: E1209 16:05:40.227975 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:05:41 crc kubenswrapper[4716]: E1209 16:05:41.216123 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:05:51 crc kubenswrapper[4716]: I1209 16:05:51.214646 4716 scope.go:117] "RemoveContainer" containerID="64726c109e2072386d53708377a845812b6c09bfc8936cc323633819d8bc1610" Dec 09 16:05:51 crc kubenswrapper[4716]: I1209 16:05:51.476008 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerStarted","Data":"a72cb3bf39c22c5582c4f970310212339f80ef4517855fa043155dba581a91f1"} Dec 09 16:05:52 crc kubenswrapper[4716]: E1209 16:05:52.216756 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:05:55 crc kubenswrapper[4716]: E1209 16:05:55.216666 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:06:05 crc kubenswrapper[4716]: E1209 16:06:05.218282 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:06:06 crc kubenswrapper[4716]: E1209 16:06:06.218058 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:06:18 crc kubenswrapper[4716]: E1209 16:06:18.215792 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:06:20 crc kubenswrapper[4716]: E1209 16:06:20.216296 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:06:29 crc kubenswrapper[4716]: E1209 16:06:29.217308 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:06:31 crc kubenswrapper[4716]: E1209 16:06:31.220430 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:06:41 crc kubenswrapper[4716]: E1209 16:06:41.216847 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:06:46 crc kubenswrapper[4716]: E1209 16:06:46.216408 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:06:56 crc kubenswrapper[4716]: E1209 16:06:56.217750 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:07:00 crc kubenswrapper[4716]: E1209 16:07:00.216186 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:07:07 crc kubenswrapper[4716]: E1209 16:07:07.218146 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:07:12 crc kubenswrapper[4716]: E1209 16:07:12.216255 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:07:19 crc kubenswrapper[4716]: E1209 16:07:19.215811 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:07:26 crc kubenswrapper[4716]: E1209 16:07:26.216149 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:07:34 crc kubenswrapper[4716]: E1209 16:07:34.216411 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:07:41 crc kubenswrapper[4716]: E1209 16:07:41.216329 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:07:46 crc kubenswrapper[4716]: E1209 16:07:46.216109 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:07:52 crc kubenswrapper[4716]: E1209 16:07:52.216905 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:08:01 crc kubenswrapper[4716]: E1209 16:08:01.216754 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:08:07 crc kubenswrapper[4716]: E1209 16:08:07.216223 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:08:12 crc kubenswrapper[4716]: E1209 16:08:12.215242 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:08:17 crc kubenswrapper[4716]: I1209 16:08:17.922176 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 16:08:17 crc kubenswrapper[4716]: I1209 16:08:17.922840 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 16:08:22 crc kubenswrapper[4716]: E1209 16:08:22.215521 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:08:26 crc kubenswrapper[4716]: E1209 16:08:26.216160 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:08:35 crc kubenswrapper[4716]: I1209 16:08:35.217001 4716 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 09 16:08:35 crc kubenswrapper[4716]: E1209 16:08:35.351829 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 09 16:08:35 crc kubenswrapper[4716]: E1209 16:08:35.351936 4716 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 09 16:08:35 crc kubenswrapper[4716]: E1209 16:08:35.352130 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qfm49,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-msshl_openstack(239c4119-944c-46e1-9425-285eeb6e0204): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 16:08:35 crc kubenswrapper[4716]: E1209 16:08:35.353359 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:08:38 crc kubenswrapper[4716]: E1209 16:08:38.331394 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 16:08:38 crc kubenswrapper[4716]: E1209 16:08:38.332010 4716 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 16:08:38 crc kubenswrapper[4716]: E1209 16:08:38.332427 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5fbh9fh77h667h669h94h589h59dh5bdh5bch9fh68h689h699h559h549h56fh5f5h68fh557h5cch7fh66ch6ch86h56h75h55fh58h5ffhf9h5fdq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lz724,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(29cebb2d-8cdb-49de-a29d-1d02808e46a9): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 16:08:38 crc kubenswrapper[4716]: E1209 16:08:38.334292 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:08:47 crc kubenswrapper[4716]: E1209 16:08:47.216551 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:08:47 crc kubenswrapper[4716]: I1209 16:08:47.922732 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 16:08:47 crc kubenswrapper[4716]: I1209 16:08:47.922809 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 16:08:50 crc kubenswrapper[4716]: E1209 16:08:50.217539 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:09:00 crc kubenswrapper[4716]: E1209 16:09:00.216432 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:09:04 crc kubenswrapper[4716]: E1209 16:09:04.215794 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:09:13 crc kubenswrapper[4716]: E1209 16:09:13.222815 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:09:15 crc kubenswrapper[4716]: E1209 16:09:15.217472 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:09:17 crc kubenswrapper[4716]: I1209 16:09:17.922028 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 16:09:17 crc kubenswrapper[4716]: I1209 16:09:17.924289 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 16:09:17 crc kubenswrapper[4716]: I1209 16:09:17.924550 4716 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" Dec 09 16:09:17 crc kubenswrapper[4716]: I1209 16:09:17.925744 4716 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a72cb3bf39c22c5582c4f970310212339f80ef4517855fa043155dba581a91f1"} pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 16:09:17 crc kubenswrapper[4716]: I1209 16:09:17.925979 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" containerID="cri-o://a72cb3bf39c22c5582c4f970310212339f80ef4517855fa043155dba581a91f1" gracePeriod=600 Dec 09 16:09:18 crc kubenswrapper[4716]: I1209 16:09:18.712391 4716 generic.go:334] "Generic (PLEG): container finished" podID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerID="a72cb3bf39c22c5582c4f970310212339f80ef4517855fa043155dba581a91f1" exitCode=0 Dec 09 16:09:18 crc kubenswrapper[4716]: I1209 16:09:18.712562 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerDied","Data":"a72cb3bf39c22c5582c4f970310212339f80ef4517855fa043155dba581a91f1"} Dec 09 16:09:18 crc kubenswrapper[4716]: I1209 16:09:18.713210 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerStarted","Data":"490eaaf1f376369148e173c56855daddfef606020fdb646064723ff6596a6f32"} Dec 09 16:09:18 crc kubenswrapper[4716]: I1209 16:09:18.713299 4716 scope.go:117] "RemoveContainer" containerID="64726c109e2072386d53708377a845812b6c09bfc8936cc323633819d8bc1610" Dec 09 16:09:28 crc kubenswrapper[4716]: E1209 16:09:28.216137 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:09:28 crc kubenswrapper[4716]: E1209 16:09:28.216848 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:09:30 crc kubenswrapper[4716]: I1209 16:09:30.855567 4716 generic.go:334] "Generic (PLEG): container finished" podID="b22ffe51-8b47-48a6-9292-0577f680d56b" containerID="a6bfd048774051b98031e0b06ba774da6aa9d638a7e0c3a77eaa34184ba90dc5" exitCode=2 Dec 09 16:09:30 crc kubenswrapper[4716]: I1209 16:09:30.855662 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kp8f2" event={"ID":"b22ffe51-8b47-48a6-9292-0577f680d56b","Type":"ContainerDied","Data":"a6bfd048774051b98031e0b06ba774da6aa9d638a7e0c3a77eaa34184ba90dc5"} Dec 09 16:09:32 crc kubenswrapper[4716]: I1209 16:09:32.344789 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kp8f2" Dec 09 16:09:32 crc kubenswrapper[4716]: I1209 16:09:32.460562 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ntr22\" (UniqueName: \"kubernetes.io/projected/b22ffe51-8b47-48a6-9292-0577f680d56b-kube-api-access-ntr22\") pod \"b22ffe51-8b47-48a6-9292-0577f680d56b\" (UID: \"b22ffe51-8b47-48a6-9292-0577f680d56b\") " Dec 09 16:09:32 crc kubenswrapper[4716]: I1209 16:09:32.461236 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b22ffe51-8b47-48a6-9292-0577f680d56b-inventory\") pod \"b22ffe51-8b47-48a6-9292-0577f680d56b\" (UID: \"b22ffe51-8b47-48a6-9292-0577f680d56b\") " Dec 09 16:09:32 crc kubenswrapper[4716]: I1209 16:09:32.461441 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b22ffe51-8b47-48a6-9292-0577f680d56b-ssh-key\") pod \"b22ffe51-8b47-48a6-9292-0577f680d56b\" (UID: \"b22ffe51-8b47-48a6-9292-0577f680d56b\") " Dec 09 16:09:32 crc kubenswrapper[4716]: I1209 16:09:32.470446 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b22ffe51-8b47-48a6-9292-0577f680d56b-kube-api-access-ntr22" (OuterVolumeSpecName: "kube-api-access-ntr22") pod "b22ffe51-8b47-48a6-9292-0577f680d56b" (UID: "b22ffe51-8b47-48a6-9292-0577f680d56b"). InnerVolumeSpecName "kube-api-access-ntr22". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:09:32 crc kubenswrapper[4716]: I1209 16:09:32.499918 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b22ffe51-8b47-48a6-9292-0577f680d56b-inventory" (OuterVolumeSpecName: "inventory") pod "b22ffe51-8b47-48a6-9292-0577f680d56b" (UID: "b22ffe51-8b47-48a6-9292-0577f680d56b"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:09:32 crc kubenswrapper[4716]: I1209 16:09:32.501257 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b22ffe51-8b47-48a6-9292-0577f680d56b-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "b22ffe51-8b47-48a6-9292-0577f680d56b" (UID: "b22ffe51-8b47-48a6-9292-0577f680d56b"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:09:32 crc kubenswrapper[4716]: I1209 16:09:32.564606 4716 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b22ffe51-8b47-48a6-9292-0577f680d56b-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 09 16:09:32 crc kubenswrapper[4716]: I1209 16:09:32.564669 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ntr22\" (UniqueName: \"kubernetes.io/projected/b22ffe51-8b47-48a6-9292-0577f680d56b-kube-api-access-ntr22\") on node \"crc\" DevicePath \"\"" Dec 09 16:09:32 crc kubenswrapper[4716]: I1209 16:09:32.564689 4716 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b22ffe51-8b47-48a6-9292-0577f680d56b-inventory\") on node \"crc\" DevicePath \"\"" Dec 09 16:09:32 crc kubenswrapper[4716]: I1209 16:09:32.877165 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kp8f2" event={"ID":"b22ffe51-8b47-48a6-9292-0577f680d56b","Type":"ContainerDied","Data":"f370c19ca1339ee2863ea390568e9c2f6117195dcecb2742cdc96cdd85eff0b1"} Dec 09 16:09:32 crc kubenswrapper[4716]: I1209 16:09:32.877533 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f370c19ca1339ee2863ea390568e9c2f6117195dcecb2742cdc96cdd85eff0b1" Dec 09 16:09:32 crc kubenswrapper[4716]: I1209 16:09:32.877249 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-kp8f2" Dec 09 16:09:42 crc kubenswrapper[4716]: E1209 16:09:42.216780 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:09:43 crc kubenswrapper[4716]: E1209 16:09:43.224108 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:09:57 crc kubenswrapper[4716]: E1209 16:09:57.216311 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:09:58 crc kubenswrapper[4716]: E1209 16:09:58.216088 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:10:05 crc kubenswrapper[4716]: I1209 16:10:05.013712 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-qkj8x"] Dec 09 16:10:05 crc kubenswrapper[4716]: E1209 16:10:05.015039 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b22ffe51-8b47-48a6-9292-0577f680d56b" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 16:10:05 crc kubenswrapper[4716]: I1209 16:10:05.015071 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="b22ffe51-8b47-48a6-9292-0577f680d56b" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 16:10:05 crc kubenswrapper[4716]: I1209 16:10:05.015460 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="b22ffe51-8b47-48a6-9292-0577f680d56b" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 16:10:05 crc kubenswrapper[4716]: I1209 16:10:05.018340 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qkj8x" Dec 09 16:10:05 crc kubenswrapper[4716]: I1209 16:10:05.025985 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qkj8x"] Dec 09 16:10:05 crc kubenswrapper[4716]: I1209 16:10:05.136737 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-259mz\" (UniqueName: \"kubernetes.io/projected/3e292c28-66a5-4ad2-a600-32b0af547d43-kube-api-access-259mz\") pod \"redhat-operators-qkj8x\" (UID: \"3e292c28-66a5-4ad2-a600-32b0af547d43\") " pod="openshift-marketplace/redhat-operators-qkj8x" Dec 09 16:10:05 crc kubenswrapper[4716]: I1209 16:10:05.136991 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3e292c28-66a5-4ad2-a600-32b0af547d43-catalog-content\") pod \"redhat-operators-qkj8x\" (UID: \"3e292c28-66a5-4ad2-a600-32b0af547d43\") " pod="openshift-marketplace/redhat-operators-qkj8x" Dec 09 16:10:05 crc kubenswrapper[4716]: I1209 16:10:05.137100 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3e292c28-66a5-4ad2-a600-32b0af547d43-utilities\") pod \"redhat-operators-qkj8x\" (UID: \"3e292c28-66a5-4ad2-a600-32b0af547d43\") " pod="openshift-marketplace/redhat-operators-qkj8x" Dec 09 16:10:05 crc kubenswrapper[4716]: I1209 16:10:05.240576 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-259mz\" (UniqueName: \"kubernetes.io/projected/3e292c28-66a5-4ad2-a600-32b0af547d43-kube-api-access-259mz\") pod \"redhat-operators-qkj8x\" (UID: \"3e292c28-66a5-4ad2-a600-32b0af547d43\") " pod="openshift-marketplace/redhat-operators-qkj8x" Dec 09 16:10:05 crc kubenswrapper[4716]: I1209 16:10:05.240778 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3e292c28-66a5-4ad2-a600-32b0af547d43-catalog-content\") pod \"redhat-operators-qkj8x\" (UID: \"3e292c28-66a5-4ad2-a600-32b0af547d43\") " pod="openshift-marketplace/redhat-operators-qkj8x" Dec 09 16:10:05 crc kubenswrapper[4716]: I1209 16:10:05.240841 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3e292c28-66a5-4ad2-a600-32b0af547d43-utilities\") pod \"redhat-operators-qkj8x\" (UID: \"3e292c28-66a5-4ad2-a600-32b0af547d43\") " pod="openshift-marketplace/redhat-operators-qkj8x" Dec 09 16:10:05 crc kubenswrapper[4716]: I1209 16:10:05.241536 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3e292c28-66a5-4ad2-a600-32b0af547d43-catalog-content\") pod \"redhat-operators-qkj8x\" (UID: \"3e292c28-66a5-4ad2-a600-32b0af547d43\") " pod="openshift-marketplace/redhat-operators-qkj8x" Dec 09 16:10:05 crc kubenswrapper[4716]: I1209 16:10:05.241569 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3e292c28-66a5-4ad2-a600-32b0af547d43-utilities\") pod \"redhat-operators-qkj8x\" (UID: \"3e292c28-66a5-4ad2-a600-32b0af547d43\") " pod="openshift-marketplace/redhat-operators-qkj8x" Dec 09 16:10:05 crc kubenswrapper[4716]: I1209 16:10:05.260502 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-259mz\" (UniqueName: \"kubernetes.io/projected/3e292c28-66a5-4ad2-a600-32b0af547d43-kube-api-access-259mz\") pod \"redhat-operators-qkj8x\" (UID: \"3e292c28-66a5-4ad2-a600-32b0af547d43\") " pod="openshift-marketplace/redhat-operators-qkj8x" Dec 09 16:10:05 crc kubenswrapper[4716]: I1209 16:10:05.349379 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qkj8x" Dec 09 16:10:05 crc kubenswrapper[4716]: I1209 16:10:05.859815 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qkj8x"] Dec 09 16:10:06 crc kubenswrapper[4716]: I1209 16:10:06.224934 4716 generic.go:334] "Generic (PLEG): container finished" podID="3e292c28-66a5-4ad2-a600-32b0af547d43" containerID="3c18e1bbc164bde47fa410da47d6d4a22b3163a82f16a6c6c865c2a09853ac33" exitCode=0 Dec 09 16:10:06 crc kubenswrapper[4716]: I1209 16:10:06.225162 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qkj8x" event={"ID":"3e292c28-66a5-4ad2-a600-32b0af547d43","Type":"ContainerDied","Data":"3c18e1bbc164bde47fa410da47d6d4a22b3163a82f16a6c6c865c2a09853ac33"} Dec 09 16:10:06 crc kubenswrapper[4716]: I1209 16:10:06.225196 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qkj8x" event={"ID":"3e292c28-66a5-4ad2-a600-32b0af547d43","Type":"ContainerStarted","Data":"12fdccc526ba589a14b4d4c3c0200f646e9f3c6a544151f3a278fc21ee514fec"} Dec 09 16:10:07 crc kubenswrapper[4716]: I1209 16:10:07.394125 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-tkb4s"] Dec 09 16:10:07 crc kubenswrapper[4716]: I1209 16:10:07.397345 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tkb4s" Dec 09 16:10:07 crc kubenswrapper[4716]: I1209 16:10:07.409925 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tkb4s"] Dec 09 16:10:07 crc kubenswrapper[4716]: I1209 16:10:07.498348 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade-utilities\") pod \"certified-operators-tkb4s\" (UID: \"d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade\") " pod="openshift-marketplace/certified-operators-tkb4s" Dec 09 16:10:07 crc kubenswrapper[4716]: I1209 16:10:07.498415 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6cwp\" (UniqueName: \"kubernetes.io/projected/d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade-kube-api-access-g6cwp\") pod \"certified-operators-tkb4s\" (UID: \"d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade\") " pod="openshift-marketplace/certified-operators-tkb4s" Dec 09 16:10:07 crc kubenswrapper[4716]: I1209 16:10:07.498746 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade-catalog-content\") pod \"certified-operators-tkb4s\" (UID: \"d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade\") " pod="openshift-marketplace/certified-operators-tkb4s" Dec 09 16:10:07 crc kubenswrapper[4716]: I1209 16:10:07.601152 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade-utilities\") pod \"certified-operators-tkb4s\" (UID: \"d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade\") " pod="openshift-marketplace/certified-operators-tkb4s" Dec 09 16:10:07 crc kubenswrapper[4716]: I1209 16:10:07.602352 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade-utilities\") pod \"certified-operators-tkb4s\" (UID: \"d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade\") " pod="openshift-marketplace/certified-operators-tkb4s" Dec 09 16:10:07 crc kubenswrapper[4716]: I1209 16:10:07.602474 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g6cwp\" (UniqueName: \"kubernetes.io/projected/d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade-kube-api-access-g6cwp\") pod \"certified-operators-tkb4s\" (UID: \"d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade\") " pod="openshift-marketplace/certified-operators-tkb4s" Dec 09 16:10:07 crc kubenswrapper[4716]: I1209 16:10:07.603747 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade-catalog-content\") pod \"certified-operators-tkb4s\" (UID: \"d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade\") " pod="openshift-marketplace/certified-operators-tkb4s" Dec 09 16:10:07 crc kubenswrapper[4716]: I1209 16:10:07.604045 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade-catalog-content\") pod \"certified-operators-tkb4s\" (UID: \"d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade\") " pod="openshift-marketplace/certified-operators-tkb4s" Dec 09 16:10:07 crc kubenswrapper[4716]: I1209 16:10:07.623330 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g6cwp\" (UniqueName: \"kubernetes.io/projected/d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade-kube-api-access-g6cwp\") pod \"certified-operators-tkb4s\" (UID: \"d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade\") " pod="openshift-marketplace/certified-operators-tkb4s" Dec 09 16:10:07 crc kubenswrapper[4716]: I1209 16:10:07.750409 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tkb4s" Dec 09 16:10:08 crc kubenswrapper[4716]: E1209 16:10:08.217405 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:10:08 crc kubenswrapper[4716]: I1209 16:10:08.255415 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qkj8x" event={"ID":"3e292c28-66a5-4ad2-a600-32b0af547d43","Type":"ContainerStarted","Data":"0c73b42b35d96dfbd22ba33a5053ef9a93e237c06098ffcce05295c4d6644a2b"} Dec 09 16:10:08 crc kubenswrapper[4716]: I1209 16:10:08.304251 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tkb4s"] Dec 09 16:10:09 crc kubenswrapper[4716]: I1209 16:10:09.267507 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tkb4s" event={"ID":"d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade","Type":"ContainerStarted","Data":"8e687ac9ea7155ec75f2c23cf17da5ef550f38d1804d6ca581d572ce89b57963"} Dec 09 16:10:09 crc kubenswrapper[4716]: I1209 16:10:09.268914 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tkb4s" event={"ID":"d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade","Type":"ContainerStarted","Data":"7c61a52e4c20737d5984fdc0657d10eac034be0fe3388e168fcc7b2a774caca1"} Dec 09 16:10:10 crc kubenswrapper[4716]: I1209 16:10:10.280258 4716 generic.go:334] "Generic (PLEG): container finished" podID="d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade" containerID="8e687ac9ea7155ec75f2c23cf17da5ef550f38d1804d6ca581d572ce89b57963" exitCode=0 Dec 09 16:10:10 crc kubenswrapper[4716]: I1209 16:10:10.280311 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tkb4s" event={"ID":"d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade","Type":"ContainerDied","Data":"8e687ac9ea7155ec75f2c23cf17da5ef550f38d1804d6ca581d572ce89b57963"} Dec 09 16:10:11 crc kubenswrapper[4716]: E1209 16:10:11.222934 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:10:11 crc kubenswrapper[4716]: I1209 16:10:11.299333 4716 generic.go:334] "Generic (PLEG): container finished" podID="3e292c28-66a5-4ad2-a600-32b0af547d43" containerID="0c73b42b35d96dfbd22ba33a5053ef9a93e237c06098ffcce05295c4d6644a2b" exitCode=0 Dec 09 16:10:11 crc kubenswrapper[4716]: I1209 16:10:11.299380 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qkj8x" event={"ID":"3e292c28-66a5-4ad2-a600-32b0af547d43","Type":"ContainerDied","Data":"0c73b42b35d96dfbd22ba33a5053ef9a93e237c06098ffcce05295c4d6644a2b"} Dec 09 16:10:12 crc kubenswrapper[4716]: I1209 16:10:12.319768 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qkj8x" event={"ID":"3e292c28-66a5-4ad2-a600-32b0af547d43","Type":"ContainerStarted","Data":"59b09e84d0fe20ce5d7153b84556d6d6203c1a59ab514b5f5843c0289e01caad"} Dec 09 16:10:12 crc kubenswrapper[4716]: I1209 16:10:12.368185 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-qkj8x" podStartSLOduration=2.764224606 podStartE2EDuration="8.368065643s" podCreationTimestamp="2025-12-09 16:10:04 +0000 UTC" firstStartedPulling="2025-12-09 16:10:06.227494507 +0000 UTC m=+3693.382238495" lastFinishedPulling="2025-12-09 16:10:11.831335544 +0000 UTC m=+3698.986079532" observedRunningTime="2025-12-09 16:10:12.355383964 +0000 UTC m=+3699.510127952" watchObservedRunningTime="2025-12-09 16:10:12.368065643 +0000 UTC m=+3699.522809631" Dec 09 16:10:15 crc kubenswrapper[4716]: I1209 16:10:15.351667 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-qkj8x" Dec 09 16:10:15 crc kubenswrapper[4716]: I1209 16:10:15.352077 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-qkj8x" Dec 09 16:10:16 crc kubenswrapper[4716]: I1209 16:10:16.404658 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-qkj8x" podUID="3e292c28-66a5-4ad2-a600-32b0af547d43" containerName="registry-server" probeResult="failure" output=< Dec 09 16:10:16 crc kubenswrapper[4716]: timeout: failed to connect service ":50051" within 1s Dec 09 16:10:16 crc kubenswrapper[4716]: > Dec 09 16:10:17 crc kubenswrapper[4716]: I1209 16:10:17.410970 4716 generic.go:334] "Generic (PLEG): container finished" podID="d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade" containerID="dc5d0197ab62bf9bcf8c7dc0ed8726b8f8ba7dc7a18702a239547f1dc0b71382" exitCode=0 Dec 09 16:10:17 crc kubenswrapper[4716]: I1209 16:10:17.411480 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tkb4s" event={"ID":"d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade","Type":"ContainerDied","Data":"dc5d0197ab62bf9bcf8c7dc0ed8726b8f8ba7dc7a18702a239547f1dc0b71382"} Dec 09 16:10:18 crc kubenswrapper[4716]: I1209 16:10:18.435999 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tkb4s" event={"ID":"d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade","Type":"ContainerStarted","Data":"a2e1c878c5ab307ddf6168d7ea5286ae016e2c61ede8be176b58984dbf25761f"} Dec 09 16:10:18 crc kubenswrapper[4716]: I1209 16:10:18.460587 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-tkb4s" podStartSLOduration=3.9057602559999998 podStartE2EDuration="11.460556887s" podCreationTimestamp="2025-12-09 16:10:07 +0000 UTC" firstStartedPulling="2025-12-09 16:10:10.2826853 +0000 UTC m=+3697.437429288" lastFinishedPulling="2025-12-09 16:10:17.837481931 +0000 UTC m=+3704.992225919" observedRunningTime="2025-12-09 16:10:18.456204754 +0000 UTC m=+3705.610948762" watchObservedRunningTime="2025-12-09 16:10:18.460556887 +0000 UTC m=+3705.615300875" Dec 09 16:10:22 crc kubenswrapper[4716]: E1209 16:10:22.217488 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:10:25 crc kubenswrapper[4716]: E1209 16:10:25.217387 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:10:26 crc kubenswrapper[4716]: I1209 16:10:26.404149 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-qkj8x" podUID="3e292c28-66a5-4ad2-a600-32b0af547d43" containerName="registry-server" probeResult="failure" output=< Dec 09 16:10:26 crc kubenswrapper[4716]: timeout: failed to connect service ":50051" within 1s Dec 09 16:10:26 crc kubenswrapper[4716]: > Dec 09 16:10:27 crc kubenswrapper[4716]: I1209 16:10:27.750549 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-tkb4s" Dec 09 16:10:27 crc kubenswrapper[4716]: I1209 16:10:27.750923 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-tkb4s" Dec 09 16:10:27 crc kubenswrapper[4716]: I1209 16:10:27.803586 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-tkb4s" Dec 09 16:10:28 crc kubenswrapper[4716]: I1209 16:10:28.598085 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-tkb4s" Dec 09 16:10:28 crc kubenswrapper[4716]: I1209 16:10:28.686865 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tkb4s"] Dec 09 16:10:28 crc kubenswrapper[4716]: I1209 16:10:28.728129 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-l22s4"] Dec 09 16:10:28 crc kubenswrapper[4716]: I1209 16:10:28.728459 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-l22s4" podUID="ecb1340b-873c-4203-8dcd-4b9fc2264d00" containerName="registry-server" containerID="cri-o://00706f05451c66bc326218bdbe1f9e56efc4abea06b85601f8e414b93682e390" gracePeriod=2 Dec 09 16:10:29 crc kubenswrapper[4716]: I1209 16:10:29.314459 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l22s4" Dec 09 16:10:29 crc kubenswrapper[4716]: I1209 16:10:29.480121 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ecb1340b-873c-4203-8dcd-4b9fc2264d00-catalog-content\") pod \"ecb1340b-873c-4203-8dcd-4b9fc2264d00\" (UID: \"ecb1340b-873c-4203-8dcd-4b9fc2264d00\") " Dec 09 16:10:29 crc kubenswrapper[4716]: I1209 16:10:29.480533 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zxcnm\" (UniqueName: \"kubernetes.io/projected/ecb1340b-873c-4203-8dcd-4b9fc2264d00-kube-api-access-zxcnm\") pod \"ecb1340b-873c-4203-8dcd-4b9fc2264d00\" (UID: \"ecb1340b-873c-4203-8dcd-4b9fc2264d00\") " Dec 09 16:10:29 crc kubenswrapper[4716]: I1209 16:10:29.480761 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ecb1340b-873c-4203-8dcd-4b9fc2264d00-utilities\") pod \"ecb1340b-873c-4203-8dcd-4b9fc2264d00\" (UID: \"ecb1340b-873c-4203-8dcd-4b9fc2264d00\") " Dec 09 16:10:29 crc kubenswrapper[4716]: I1209 16:10:29.482093 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ecb1340b-873c-4203-8dcd-4b9fc2264d00-utilities" (OuterVolumeSpecName: "utilities") pod "ecb1340b-873c-4203-8dcd-4b9fc2264d00" (UID: "ecb1340b-873c-4203-8dcd-4b9fc2264d00"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:10:29 crc kubenswrapper[4716]: I1209 16:10:29.499042 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ecb1340b-873c-4203-8dcd-4b9fc2264d00-kube-api-access-zxcnm" (OuterVolumeSpecName: "kube-api-access-zxcnm") pod "ecb1340b-873c-4203-8dcd-4b9fc2264d00" (UID: "ecb1340b-873c-4203-8dcd-4b9fc2264d00"). InnerVolumeSpecName "kube-api-access-zxcnm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:10:29 crc kubenswrapper[4716]: I1209 16:10:29.548548 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ecb1340b-873c-4203-8dcd-4b9fc2264d00-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ecb1340b-873c-4203-8dcd-4b9fc2264d00" (UID: "ecb1340b-873c-4203-8dcd-4b9fc2264d00"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:10:29 crc kubenswrapper[4716]: I1209 16:10:29.560921 4716 generic.go:334] "Generic (PLEG): container finished" podID="ecb1340b-873c-4203-8dcd-4b9fc2264d00" containerID="00706f05451c66bc326218bdbe1f9e56efc4abea06b85601f8e414b93682e390" exitCode=0 Dec 09 16:10:29 crc kubenswrapper[4716]: I1209 16:10:29.561004 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l22s4" Dec 09 16:10:29 crc kubenswrapper[4716]: I1209 16:10:29.561049 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l22s4" event={"ID":"ecb1340b-873c-4203-8dcd-4b9fc2264d00","Type":"ContainerDied","Data":"00706f05451c66bc326218bdbe1f9e56efc4abea06b85601f8e414b93682e390"} Dec 09 16:10:29 crc kubenswrapper[4716]: I1209 16:10:29.561082 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l22s4" event={"ID":"ecb1340b-873c-4203-8dcd-4b9fc2264d00","Type":"ContainerDied","Data":"afbddd0eb274610a910e307cfe44fec183ab95cea824908cb27daf64e8883b29"} Dec 09 16:10:29 crc kubenswrapper[4716]: I1209 16:10:29.561187 4716 scope.go:117] "RemoveContainer" containerID="00706f05451c66bc326218bdbe1f9e56efc4abea06b85601f8e414b93682e390" Dec 09 16:10:29 crc kubenswrapper[4716]: I1209 16:10:29.584019 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zxcnm\" (UniqueName: \"kubernetes.io/projected/ecb1340b-873c-4203-8dcd-4b9fc2264d00-kube-api-access-zxcnm\") on node \"crc\" DevicePath \"\"" Dec 09 16:10:29 crc kubenswrapper[4716]: I1209 16:10:29.584069 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ecb1340b-873c-4203-8dcd-4b9fc2264d00-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 16:10:29 crc kubenswrapper[4716]: I1209 16:10:29.584088 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ecb1340b-873c-4203-8dcd-4b9fc2264d00-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 16:10:29 crc kubenswrapper[4716]: I1209 16:10:29.593823 4716 scope.go:117] "RemoveContainer" containerID="24fe96fe521b0695cad9be77a0cb7a27fa72adc5822f7e0d943a4fc42ddf79de" Dec 09 16:10:29 crc kubenswrapper[4716]: I1209 16:10:29.604523 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-l22s4"] Dec 09 16:10:29 crc kubenswrapper[4716]: I1209 16:10:29.626448 4716 scope.go:117] "RemoveContainer" containerID="d211491304fbb3b1250c7c1b71fa799fe24475ee100d1ce2f3e8de5c1b54ee67" Dec 09 16:10:29 crc kubenswrapper[4716]: I1209 16:10:29.644373 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-l22s4"] Dec 09 16:10:29 crc kubenswrapper[4716]: I1209 16:10:29.690590 4716 scope.go:117] "RemoveContainer" containerID="00706f05451c66bc326218bdbe1f9e56efc4abea06b85601f8e414b93682e390" Dec 09 16:10:29 crc kubenswrapper[4716]: E1209 16:10:29.691121 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"00706f05451c66bc326218bdbe1f9e56efc4abea06b85601f8e414b93682e390\": container with ID starting with 00706f05451c66bc326218bdbe1f9e56efc4abea06b85601f8e414b93682e390 not found: ID does not exist" containerID="00706f05451c66bc326218bdbe1f9e56efc4abea06b85601f8e414b93682e390" Dec 09 16:10:29 crc kubenswrapper[4716]: I1209 16:10:29.691232 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"00706f05451c66bc326218bdbe1f9e56efc4abea06b85601f8e414b93682e390"} err="failed to get container status \"00706f05451c66bc326218bdbe1f9e56efc4abea06b85601f8e414b93682e390\": rpc error: code = NotFound desc = could not find container \"00706f05451c66bc326218bdbe1f9e56efc4abea06b85601f8e414b93682e390\": container with ID starting with 00706f05451c66bc326218bdbe1f9e56efc4abea06b85601f8e414b93682e390 not found: ID does not exist" Dec 09 16:10:29 crc kubenswrapper[4716]: I1209 16:10:29.691324 4716 scope.go:117] "RemoveContainer" containerID="24fe96fe521b0695cad9be77a0cb7a27fa72adc5822f7e0d943a4fc42ddf79de" Dec 09 16:10:29 crc kubenswrapper[4716]: E1209 16:10:29.691773 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"24fe96fe521b0695cad9be77a0cb7a27fa72adc5822f7e0d943a4fc42ddf79de\": container with ID starting with 24fe96fe521b0695cad9be77a0cb7a27fa72adc5822f7e0d943a4fc42ddf79de not found: ID does not exist" containerID="24fe96fe521b0695cad9be77a0cb7a27fa72adc5822f7e0d943a4fc42ddf79de" Dec 09 16:10:29 crc kubenswrapper[4716]: I1209 16:10:29.691891 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24fe96fe521b0695cad9be77a0cb7a27fa72adc5822f7e0d943a4fc42ddf79de"} err="failed to get container status \"24fe96fe521b0695cad9be77a0cb7a27fa72adc5822f7e0d943a4fc42ddf79de\": rpc error: code = NotFound desc = could not find container \"24fe96fe521b0695cad9be77a0cb7a27fa72adc5822f7e0d943a4fc42ddf79de\": container with ID starting with 24fe96fe521b0695cad9be77a0cb7a27fa72adc5822f7e0d943a4fc42ddf79de not found: ID does not exist" Dec 09 16:10:29 crc kubenswrapper[4716]: I1209 16:10:29.691987 4716 scope.go:117] "RemoveContainer" containerID="d211491304fbb3b1250c7c1b71fa799fe24475ee100d1ce2f3e8de5c1b54ee67" Dec 09 16:10:29 crc kubenswrapper[4716]: E1209 16:10:29.692320 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d211491304fbb3b1250c7c1b71fa799fe24475ee100d1ce2f3e8de5c1b54ee67\": container with ID starting with d211491304fbb3b1250c7c1b71fa799fe24475ee100d1ce2f3e8de5c1b54ee67 not found: ID does not exist" containerID="d211491304fbb3b1250c7c1b71fa799fe24475ee100d1ce2f3e8de5c1b54ee67" Dec 09 16:10:29 crc kubenswrapper[4716]: I1209 16:10:29.692421 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d211491304fbb3b1250c7c1b71fa799fe24475ee100d1ce2f3e8de5c1b54ee67"} err="failed to get container status \"d211491304fbb3b1250c7c1b71fa799fe24475ee100d1ce2f3e8de5c1b54ee67\": rpc error: code = NotFound desc = could not find container \"d211491304fbb3b1250c7c1b71fa799fe24475ee100d1ce2f3e8de5c1b54ee67\": container with ID starting with d211491304fbb3b1250c7c1b71fa799fe24475ee100d1ce2f3e8de5c1b54ee67 not found: ID does not exist" Dec 09 16:10:31 crc kubenswrapper[4716]: I1209 16:10:31.227126 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ecb1340b-873c-4203-8dcd-4b9fc2264d00" path="/var/lib/kubelet/pods/ecb1340b-873c-4203-8dcd-4b9fc2264d00/volumes" Dec 09 16:10:35 crc kubenswrapper[4716]: E1209 16:10:35.216193 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:10:35 crc kubenswrapper[4716]: I1209 16:10:35.415338 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-qkj8x" Dec 09 16:10:35 crc kubenswrapper[4716]: I1209 16:10:35.465805 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-qkj8x" Dec 09 16:10:36 crc kubenswrapper[4716]: I1209 16:10:36.209982 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qkj8x"] Dec 09 16:10:36 crc kubenswrapper[4716]: I1209 16:10:36.637884 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-qkj8x" podUID="3e292c28-66a5-4ad2-a600-32b0af547d43" containerName="registry-server" containerID="cri-o://59b09e84d0fe20ce5d7153b84556d6d6203c1a59ab514b5f5843c0289e01caad" gracePeriod=2 Dec 09 16:10:37 crc kubenswrapper[4716]: E1209 16:10:37.215684 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:10:37 crc kubenswrapper[4716]: I1209 16:10:37.652655 4716 generic.go:334] "Generic (PLEG): container finished" podID="3e292c28-66a5-4ad2-a600-32b0af547d43" containerID="59b09e84d0fe20ce5d7153b84556d6d6203c1a59ab514b5f5843c0289e01caad" exitCode=0 Dec 09 16:10:37 crc kubenswrapper[4716]: I1209 16:10:37.652698 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qkj8x" event={"ID":"3e292c28-66a5-4ad2-a600-32b0af547d43","Type":"ContainerDied","Data":"59b09e84d0fe20ce5d7153b84556d6d6203c1a59ab514b5f5843c0289e01caad"} Dec 09 16:10:37 crc kubenswrapper[4716]: I1209 16:10:37.652726 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qkj8x" event={"ID":"3e292c28-66a5-4ad2-a600-32b0af547d43","Type":"ContainerDied","Data":"12fdccc526ba589a14b4d4c3c0200f646e9f3c6a544151f3a278fc21ee514fec"} Dec 09 16:10:37 crc kubenswrapper[4716]: I1209 16:10:37.652736 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="12fdccc526ba589a14b4d4c3c0200f646e9f3c6a544151f3a278fc21ee514fec" Dec 09 16:10:37 crc kubenswrapper[4716]: I1209 16:10:37.659489 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qkj8x" Dec 09 16:10:37 crc kubenswrapper[4716]: I1209 16:10:37.793588 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-259mz\" (UniqueName: \"kubernetes.io/projected/3e292c28-66a5-4ad2-a600-32b0af547d43-kube-api-access-259mz\") pod \"3e292c28-66a5-4ad2-a600-32b0af547d43\" (UID: \"3e292c28-66a5-4ad2-a600-32b0af547d43\") " Dec 09 16:10:37 crc kubenswrapper[4716]: I1209 16:10:37.793841 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3e292c28-66a5-4ad2-a600-32b0af547d43-utilities\") pod \"3e292c28-66a5-4ad2-a600-32b0af547d43\" (UID: \"3e292c28-66a5-4ad2-a600-32b0af547d43\") " Dec 09 16:10:37 crc kubenswrapper[4716]: I1209 16:10:37.793953 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3e292c28-66a5-4ad2-a600-32b0af547d43-catalog-content\") pod \"3e292c28-66a5-4ad2-a600-32b0af547d43\" (UID: \"3e292c28-66a5-4ad2-a600-32b0af547d43\") " Dec 09 16:10:37 crc kubenswrapper[4716]: I1209 16:10:37.794824 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3e292c28-66a5-4ad2-a600-32b0af547d43-utilities" (OuterVolumeSpecName: "utilities") pod "3e292c28-66a5-4ad2-a600-32b0af547d43" (UID: "3e292c28-66a5-4ad2-a600-32b0af547d43"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:10:37 crc kubenswrapper[4716]: I1209 16:10:37.800712 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3e292c28-66a5-4ad2-a600-32b0af547d43-kube-api-access-259mz" (OuterVolumeSpecName: "kube-api-access-259mz") pod "3e292c28-66a5-4ad2-a600-32b0af547d43" (UID: "3e292c28-66a5-4ad2-a600-32b0af547d43"). InnerVolumeSpecName "kube-api-access-259mz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:10:37 crc kubenswrapper[4716]: I1209 16:10:37.802279 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-259mz\" (UniqueName: \"kubernetes.io/projected/3e292c28-66a5-4ad2-a600-32b0af547d43-kube-api-access-259mz\") on node \"crc\" DevicePath \"\"" Dec 09 16:10:37 crc kubenswrapper[4716]: I1209 16:10:37.802310 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3e292c28-66a5-4ad2-a600-32b0af547d43-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 16:10:37 crc kubenswrapper[4716]: I1209 16:10:37.902406 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3e292c28-66a5-4ad2-a600-32b0af547d43-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3e292c28-66a5-4ad2-a600-32b0af547d43" (UID: "3e292c28-66a5-4ad2-a600-32b0af547d43"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:10:37 crc kubenswrapper[4716]: I1209 16:10:37.903888 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3e292c28-66a5-4ad2-a600-32b0af547d43-catalog-content\") pod \"3e292c28-66a5-4ad2-a600-32b0af547d43\" (UID: \"3e292c28-66a5-4ad2-a600-32b0af547d43\") " Dec 09 16:10:37 crc kubenswrapper[4716]: W1209 16:10:37.904212 4716 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/3e292c28-66a5-4ad2-a600-32b0af547d43/volumes/kubernetes.io~empty-dir/catalog-content Dec 09 16:10:37 crc kubenswrapper[4716]: I1209 16:10:37.904250 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3e292c28-66a5-4ad2-a600-32b0af547d43-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3e292c28-66a5-4ad2-a600-32b0af547d43" (UID: "3e292c28-66a5-4ad2-a600-32b0af547d43"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:10:37 crc kubenswrapper[4716]: I1209 16:10:37.904661 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3e292c28-66a5-4ad2-a600-32b0af547d43-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 16:10:38 crc kubenswrapper[4716]: I1209 16:10:38.664850 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qkj8x" Dec 09 16:10:38 crc kubenswrapper[4716]: I1209 16:10:38.709887 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qkj8x"] Dec 09 16:10:38 crc kubenswrapper[4716]: I1209 16:10:38.725169 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-qkj8x"] Dec 09 16:10:39 crc kubenswrapper[4716]: I1209 16:10:39.226252 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3e292c28-66a5-4ad2-a600-32b0af547d43" path="/var/lib/kubelet/pods/3e292c28-66a5-4ad2-a600-32b0af547d43/volumes" Dec 09 16:10:48 crc kubenswrapper[4716]: E1209 16:10:48.217120 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:10:49 crc kubenswrapper[4716]: I1209 16:10:49.061240 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-85wnn"] Dec 09 16:10:49 crc kubenswrapper[4716]: E1209 16:10:49.062265 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ecb1340b-873c-4203-8dcd-4b9fc2264d00" containerName="registry-server" Dec 09 16:10:49 crc kubenswrapper[4716]: I1209 16:10:49.062297 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="ecb1340b-873c-4203-8dcd-4b9fc2264d00" containerName="registry-server" Dec 09 16:10:49 crc kubenswrapper[4716]: E1209 16:10:49.062340 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e292c28-66a5-4ad2-a600-32b0af547d43" containerName="extract-content" Dec 09 16:10:49 crc kubenswrapper[4716]: I1209 16:10:49.062348 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e292c28-66a5-4ad2-a600-32b0af547d43" containerName="extract-content" Dec 09 16:10:49 crc kubenswrapper[4716]: E1209 16:10:49.062376 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ecb1340b-873c-4203-8dcd-4b9fc2264d00" containerName="extract-utilities" Dec 09 16:10:49 crc kubenswrapper[4716]: I1209 16:10:49.062385 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="ecb1340b-873c-4203-8dcd-4b9fc2264d00" containerName="extract-utilities" Dec 09 16:10:49 crc kubenswrapper[4716]: E1209 16:10:49.062400 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e292c28-66a5-4ad2-a600-32b0af547d43" containerName="registry-server" Dec 09 16:10:49 crc kubenswrapper[4716]: I1209 16:10:49.062408 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e292c28-66a5-4ad2-a600-32b0af547d43" containerName="registry-server" Dec 09 16:10:49 crc kubenswrapper[4716]: E1209 16:10:49.062445 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e292c28-66a5-4ad2-a600-32b0af547d43" containerName="extract-utilities" Dec 09 16:10:49 crc kubenswrapper[4716]: I1209 16:10:49.062453 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e292c28-66a5-4ad2-a600-32b0af547d43" containerName="extract-utilities" Dec 09 16:10:49 crc kubenswrapper[4716]: E1209 16:10:49.062469 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ecb1340b-873c-4203-8dcd-4b9fc2264d00" containerName="extract-content" Dec 09 16:10:49 crc kubenswrapper[4716]: I1209 16:10:49.062478 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="ecb1340b-873c-4203-8dcd-4b9fc2264d00" containerName="extract-content" Dec 09 16:10:49 crc kubenswrapper[4716]: I1209 16:10:49.062830 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e292c28-66a5-4ad2-a600-32b0af547d43" containerName="registry-server" Dec 09 16:10:49 crc kubenswrapper[4716]: I1209 16:10:49.062869 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="ecb1340b-873c-4203-8dcd-4b9fc2264d00" containerName="registry-server" Dec 09 16:10:49 crc kubenswrapper[4716]: I1209 16:10:49.064124 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-85wnn" Dec 09 16:10:49 crc kubenswrapper[4716]: I1209 16:10:49.071022 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-vjwcm" Dec 09 16:10:49 crc kubenswrapper[4716]: I1209 16:10:49.073369 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 09 16:10:49 crc kubenswrapper[4716]: I1209 16:10:49.073439 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 09 16:10:49 crc kubenswrapper[4716]: I1209 16:10:49.083127 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-85wnn"] Dec 09 16:10:49 crc kubenswrapper[4716]: I1209 16:10:49.083299 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 09 16:10:49 crc kubenswrapper[4716]: I1209 16:10:49.113357 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r7xx2\" (UniqueName: \"kubernetes.io/projected/3f192ee3-f23a-4d2f-b423-fee621bf273e-kube-api-access-r7xx2\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-85wnn\" (UID: \"3f192ee3-f23a-4d2f-b423-fee621bf273e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-85wnn" Dec 09 16:10:49 crc kubenswrapper[4716]: I1209 16:10:49.113462 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3f192ee3-f23a-4d2f-b423-fee621bf273e-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-85wnn\" (UID: \"3f192ee3-f23a-4d2f-b423-fee621bf273e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-85wnn" Dec 09 16:10:49 crc kubenswrapper[4716]: I1209 16:10:49.113518 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3f192ee3-f23a-4d2f-b423-fee621bf273e-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-85wnn\" (UID: \"3f192ee3-f23a-4d2f-b423-fee621bf273e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-85wnn" Dec 09 16:10:49 crc kubenswrapper[4716]: I1209 16:10:49.215559 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r7xx2\" (UniqueName: \"kubernetes.io/projected/3f192ee3-f23a-4d2f-b423-fee621bf273e-kube-api-access-r7xx2\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-85wnn\" (UID: \"3f192ee3-f23a-4d2f-b423-fee621bf273e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-85wnn" Dec 09 16:10:49 crc kubenswrapper[4716]: I1209 16:10:49.215668 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3f192ee3-f23a-4d2f-b423-fee621bf273e-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-85wnn\" (UID: \"3f192ee3-f23a-4d2f-b423-fee621bf273e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-85wnn" Dec 09 16:10:49 crc kubenswrapper[4716]: I1209 16:10:49.215714 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3f192ee3-f23a-4d2f-b423-fee621bf273e-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-85wnn\" (UID: \"3f192ee3-f23a-4d2f-b423-fee621bf273e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-85wnn" Dec 09 16:10:49 crc kubenswrapper[4716]: I1209 16:10:49.222681 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3f192ee3-f23a-4d2f-b423-fee621bf273e-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-85wnn\" (UID: \"3f192ee3-f23a-4d2f-b423-fee621bf273e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-85wnn" Dec 09 16:10:49 crc kubenswrapper[4716]: I1209 16:10:49.240307 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3f192ee3-f23a-4d2f-b423-fee621bf273e-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-85wnn\" (UID: \"3f192ee3-f23a-4d2f-b423-fee621bf273e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-85wnn" Dec 09 16:10:49 crc kubenswrapper[4716]: I1209 16:10:49.240528 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r7xx2\" (UniqueName: \"kubernetes.io/projected/3f192ee3-f23a-4d2f-b423-fee621bf273e-kube-api-access-r7xx2\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-85wnn\" (UID: \"3f192ee3-f23a-4d2f-b423-fee621bf273e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-85wnn" Dec 09 16:10:49 crc kubenswrapper[4716]: I1209 16:10:49.398892 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-85wnn" Dec 09 16:10:50 crc kubenswrapper[4716]: E1209 16:10:50.220990 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:10:50 crc kubenswrapper[4716]: W1209 16:10:50.562310 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3f192ee3_f23a_4d2f_b423_fee621bf273e.slice/crio-e7c80c64bc8d96eed34a0986230e682f49300616bda0434160522fee53c17eb8 WatchSource:0}: Error finding container e7c80c64bc8d96eed34a0986230e682f49300616bda0434160522fee53c17eb8: Status 404 returned error can't find the container with id e7c80c64bc8d96eed34a0986230e682f49300616bda0434160522fee53c17eb8 Dec 09 16:10:50 crc kubenswrapper[4716]: I1209 16:10:50.577568 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-85wnn"] Dec 09 16:10:50 crc kubenswrapper[4716]: I1209 16:10:50.808614 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-85wnn" event={"ID":"3f192ee3-f23a-4d2f-b423-fee621bf273e","Type":"ContainerStarted","Data":"e7c80c64bc8d96eed34a0986230e682f49300616bda0434160522fee53c17eb8"} Dec 09 16:10:51 crc kubenswrapper[4716]: I1209 16:10:51.819333 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-85wnn" event={"ID":"3f192ee3-f23a-4d2f-b423-fee621bf273e","Type":"ContainerStarted","Data":"2343810a474a27e8bbabc3a21ef737002fadff4c0e20002d2bcc7ca35c814ad2"} Dec 09 16:10:51 crc kubenswrapper[4716]: I1209 16:10:51.842521 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-85wnn" podStartSLOduration=2.19196051 podStartE2EDuration="2.842492844s" podCreationTimestamp="2025-12-09 16:10:49 +0000 UTC" firstStartedPulling="2025-12-09 16:10:50.564347225 +0000 UTC m=+3737.719091213" lastFinishedPulling="2025-12-09 16:10:51.214879559 +0000 UTC m=+3738.369623547" observedRunningTime="2025-12-09 16:10:51.831883844 +0000 UTC m=+3738.986627852" watchObservedRunningTime="2025-12-09 16:10:51.842492844 +0000 UTC m=+3738.997236832" Dec 09 16:11:01 crc kubenswrapper[4716]: E1209 16:11:01.217323 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:11:04 crc kubenswrapper[4716]: E1209 16:11:04.220741 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:11:15 crc kubenswrapper[4716]: E1209 16:11:15.217126 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:11:17 crc kubenswrapper[4716]: E1209 16:11:17.216414 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:11:27 crc kubenswrapper[4716]: E1209 16:11:27.215907 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:11:31 crc kubenswrapper[4716]: E1209 16:11:31.217216 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:11:40 crc kubenswrapper[4716]: E1209 16:11:40.216233 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:11:42 crc kubenswrapper[4716]: E1209 16:11:42.215269 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:11:47 crc kubenswrapper[4716]: I1209 16:11:47.922303 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 16:11:47 crc kubenswrapper[4716]: I1209 16:11:47.922765 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 16:11:55 crc kubenswrapper[4716]: E1209 16:11:55.215683 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:11:55 crc kubenswrapper[4716]: E1209 16:11:55.217364 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:12:08 crc kubenswrapper[4716]: E1209 16:12:08.216274 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:12:08 crc kubenswrapper[4716]: E1209 16:12:08.217172 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:12:17 crc kubenswrapper[4716]: I1209 16:12:17.922356 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 16:12:17 crc kubenswrapper[4716]: I1209 16:12:17.923143 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 16:12:20 crc kubenswrapper[4716]: E1209 16:12:20.217430 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:12:21 crc kubenswrapper[4716]: E1209 16:12:21.216075 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:12:33 crc kubenswrapper[4716]: E1209 16:12:33.223458 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:12:35 crc kubenswrapper[4716]: E1209 16:12:35.216403 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:12:45 crc kubenswrapper[4716]: E1209 16:12:45.218873 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:12:47 crc kubenswrapper[4716]: I1209 16:12:47.922283 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 16:12:47 crc kubenswrapper[4716]: I1209 16:12:47.923118 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 16:12:47 crc kubenswrapper[4716]: I1209 16:12:47.923180 4716 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" Dec 09 16:12:47 crc kubenswrapper[4716]: I1209 16:12:47.924206 4716 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"490eaaf1f376369148e173c56855daddfef606020fdb646064723ff6596a6f32"} pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 16:12:47 crc kubenswrapper[4716]: I1209 16:12:47.924261 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" containerID="cri-o://490eaaf1f376369148e173c56855daddfef606020fdb646064723ff6596a6f32" gracePeriod=600 Dec 09 16:12:48 crc kubenswrapper[4716]: E1209 16:12:48.056399 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:12:48 crc kubenswrapper[4716]: I1209 16:12:48.082830 4716 generic.go:334] "Generic (PLEG): container finished" podID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerID="490eaaf1f376369148e173c56855daddfef606020fdb646064723ff6596a6f32" exitCode=0 Dec 09 16:12:48 crc kubenswrapper[4716]: I1209 16:12:48.082885 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerDied","Data":"490eaaf1f376369148e173c56855daddfef606020fdb646064723ff6596a6f32"} Dec 09 16:12:48 crc kubenswrapper[4716]: I1209 16:12:48.082933 4716 scope.go:117] "RemoveContainer" containerID="a72cb3bf39c22c5582c4f970310212339f80ef4517855fa043155dba581a91f1" Dec 09 16:12:48 crc kubenswrapper[4716]: I1209 16:12:48.083933 4716 scope.go:117] "RemoveContainer" containerID="490eaaf1f376369148e173c56855daddfef606020fdb646064723ff6596a6f32" Dec 09 16:12:48 crc kubenswrapper[4716]: E1209 16:12:48.084366 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:12:50 crc kubenswrapper[4716]: E1209 16:12:50.217044 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:12:58 crc kubenswrapper[4716]: E1209 16:12:58.217043 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:13:00 crc kubenswrapper[4716]: I1209 16:13:00.213840 4716 scope.go:117] "RemoveContainer" containerID="490eaaf1f376369148e173c56855daddfef606020fdb646064723ff6596a6f32" Dec 09 16:13:00 crc kubenswrapper[4716]: E1209 16:13:00.214561 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:13:05 crc kubenswrapper[4716]: E1209 16:13:05.216686 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:13:12 crc kubenswrapper[4716]: E1209 16:13:12.216356 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:13:13 crc kubenswrapper[4716]: I1209 16:13:13.224085 4716 scope.go:117] "RemoveContainer" containerID="490eaaf1f376369148e173c56855daddfef606020fdb646064723ff6596a6f32" Dec 09 16:13:13 crc kubenswrapper[4716]: E1209 16:13:13.224411 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:13:19 crc kubenswrapper[4716]: E1209 16:13:19.216437 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:13:24 crc kubenswrapper[4716]: I1209 16:13:24.214115 4716 scope.go:117] "RemoveContainer" containerID="490eaaf1f376369148e173c56855daddfef606020fdb646064723ff6596a6f32" Dec 09 16:13:24 crc kubenswrapper[4716]: E1209 16:13:24.215289 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:13:25 crc kubenswrapper[4716]: E1209 16:13:25.217002 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:13:32 crc kubenswrapper[4716]: E1209 16:13:32.402948 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:13:37 crc kubenswrapper[4716]: I1209 16:13:37.216518 4716 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 09 16:13:37 crc kubenswrapper[4716]: E1209 16:13:37.566819 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 09 16:13:37 crc kubenswrapper[4716]: E1209 16:13:37.567147 4716 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 09 16:13:37 crc kubenswrapper[4716]: E1209 16:13:37.567407 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qfm49,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-msshl_openstack(239c4119-944c-46e1-9425-285eeb6e0204): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 16:13:37 crc kubenswrapper[4716]: E1209 16:13:37.569704 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:13:38 crc kubenswrapper[4716]: I1209 16:13:38.213414 4716 scope.go:117] "RemoveContainer" containerID="490eaaf1f376369148e173c56855daddfef606020fdb646064723ff6596a6f32" Dec 09 16:13:38 crc kubenswrapper[4716]: E1209 16:13:38.213775 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:13:46 crc kubenswrapper[4716]: E1209 16:13:46.415435 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 16:13:46 crc kubenswrapper[4716]: E1209 16:13:46.416254 4716 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 16:13:46 crc kubenswrapper[4716]: E1209 16:13:46.416416 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5fbh9fh77h667h669h94h589h59dh5bdh5bch9fh68h689h699h559h549h56fh5f5h68fh557h5cch7fh66ch6ch86h56h75h55fh58h5ffhf9h5fdq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lz724,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(29cebb2d-8cdb-49de-a29d-1d02808e46a9): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 16:13:46 crc kubenswrapper[4716]: E1209 16:13:46.417902 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:13:49 crc kubenswrapper[4716]: I1209 16:13:49.214166 4716 scope.go:117] "RemoveContainer" containerID="490eaaf1f376369148e173c56855daddfef606020fdb646064723ff6596a6f32" Dec 09 16:13:49 crc kubenswrapper[4716]: E1209 16:13:49.215130 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:13:52 crc kubenswrapper[4716]: E1209 16:13:52.354500 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:13:57 crc kubenswrapper[4716]: E1209 16:13:57.216160 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:14:02 crc kubenswrapper[4716]: I1209 16:14:02.214238 4716 scope.go:117] "RemoveContainer" containerID="490eaaf1f376369148e173c56855daddfef606020fdb646064723ff6596a6f32" Dec 09 16:14:02 crc kubenswrapper[4716]: E1209 16:14:02.216487 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:14:03 crc kubenswrapper[4716]: E1209 16:14:03.224712 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:14:10 crc kubenswrapper[4716]: E1209 16:14:10.218441 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:14:16 crc kubenswrapper[4716]: I1209 16:14:16.214168 4716 scope.go:117] "RemoveContainer" containerID="490eaaf1f376369148e173c56855daddfef606020fdb646064723ff6596a6f32" Dec 09 16:14:16 crc kubenswrapper[4716]: E1209 16:14:16.215081 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:14:16 crc kubenswrapper[4716]: E1209 16:14:16.216351 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:14:23 crc kubenswrapper[4716]: E1209 16:14:23.251971 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:14:28 crc kubenswrapper[4716]: E1209 16:14:28.216614 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:14:30 crc kubenswrapper[4716]: I1209 16:14:30.214659 4716 scope.go:117] "RemoveContainer" containerID="490eaaf1f376369148e173c56855daddfef606020fdb646064723ff6596a6f32" Dec 09 16:14:30 crc kubenswrapper[4716]: E1209 16:14:30.215367 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:14:36 crc kubenswrapper[4716]: E1209 16:14:36.218020 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:14:41 crc kubenswrapper[4716]: I1209 16:14:41.213489 4716 scope.go:117] "RemoveContainer" containerID="490eaaf1f376369148e173c56855daddfef606020fdb646064723ff6596a6f32" Dec 09 16:14:41 crc kubenswrapper[4716]: E1209 16:14:41.214701 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:14:41 crc kubenswrapper[4716]: E1209 16:14:41.215940 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:14:49 crc kubenswrapper[4716]: E1209 16:14:49.216564 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:14:52 crc kubenswrapper[4716]: I1209 16:14:52.214612 4716 scope.go:117] "RemoveContainer" containerID="490eaaf1f376369148e173c56855daddfef606020fdb646064723ff6596a6f32" Dec 09 16:14:52 crc kubenswrapper[4716]: E1209 16:14:52.215503 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:14:52 crc kubenswrapper[4716]: E1209 16:14:52.216636 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:15:00 crc kubenswrapper[4716]: I1209 16:15:00.170141 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421615-rjf9z"] Dec 09 16:15:00 crc kubenswrapper[4716]: I1209 16:15:00.173061 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421615-rjf9z" Dec 09 16:15:00 crc kubenswrapper[4716]: I1209 16:15:00.176132 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 09 16:15:00 crc kubenswrapper[4716]: I1209 16:15:00.177317 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 09 16:15:00 crc kubenswrapper[4716]: I1209 16:15:00.183099 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421615-rjf9z"] Dec 09 16:15:00 crc kubenswrapper[4716]: I1209 16:15:00.199720 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1849e11d-7a37-4dd5-8c18-f7306be11319-secret-volume\") pod \"collect-profiles-29421615-rjf9z\" (UID: \"1849e11d-7a37-4dd5-8c18-f7306be11319\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421615-rjf9z" Dec 09 16:15:00 crc kubenswrapper[4716]: I1209 16:15:00.199819 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzzb8\" (UniqueName: \"kubernetes.io/projected/1849e11d-7a37-4dd5-8c18-f7306be11319-kube-api-access-rzzb8\") pod \"collect-profiles-29421615-rjf9z\" (UID: \"1849e11d-7a37-4dd5-8c18-f7306be11319\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421615-rjf9z" Dec 09 16:15:00 crc kubenswrapper[4716]: I1209 16:15:00.200164 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1849e11d-7a37-4dd5-8c18-f7306be11319-config-volume\") pod \"collect-profiles-29421615-rjf9z\" (UID: \"1849e11d-7a37-4dd5-8c18-f7306be11319\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421615-rjf9z" Dec 09 16:15:00 crc kubenswrapper[4716]: I1209 16:15:00.302546 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1849e11d-7a37-4dd5-8c18-f7306be11319-config-volume\") pod \"collect-profiles-29421615-rjf9z\" (UID: \"1849e11d-7a37-4dd5-8c18-f7306be11319\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421615-rjf9z" Dec 09 16:15:00 crc kubenswrapper[4716]: I1209 16:15:00.302879 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1849e11d-7a37-4dd5-8c18-f7306be11319-secret-volume\") pod \"collect-profiles-29421615-rjf9z\" (UID: \"1849e11d-7a37-4dd5-8c18-f7306be11319\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421615-rjf9z" Dec 09 16:15:00 crc kubenswrapper[4716]: I1209 16:15:00.303555 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1849e11d-7a37-4dd5-8c18-f7306be11319-config-volume\") pod \"collect-profiles-29421615-rjf9z\" (UID: \"1849e11d-7a37-4dd5-8c18-f7306be11319\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421615-rjf9z" Dec 09 16:15:00 crc kubenswrapper[4716]: I1209 16:15:00.304007 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzzb8\" (UniqueName: \"kubernetes.io/projected/1849e11d-7a37-4dd5-8c18-f7306be11319-kube-api-access-rzzb8\") pod \"collect-profiles-29421615-rjf9z\" (UID: \"1849e11d-7a37-4dd5-8c18-f7306be11319\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421615-rjf9z" Dec 09 16:15:00 crc kubenswrapper[4716]: I1209 16:15:00.317363 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1849e11d-7a37-4dd5-8c18-f7306be11319-secret-volume\") pod \"collect-profiles-29421615-rjf9z\" (UID: \"1849e11d-7a37-4dd5-8c18-f7306be11319\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421615-rjf9z" Dec 09 16:15:00 crc kubenswrapper[4716]: I1209 16:15:00.320525 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzzb8\" (UniqueName: \"kubernetes.io/projected/1849e11d-7a37-4dd5-8c18-f7306be11319-kube-api-access-rzzb8\") pod \"collect-profiles-29421615-rjf9z\" (UID: \"1849e11d-7a37-4dd5-8c18-f7306be11319\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421615-rjf9z" Dec 09 16:15:00 crc kubenswrapper[4716]: I1209 16:15:00.516600 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421615-rjf9z" Dec 09 16:15:01 crc kubenswrapper[4716]: W1209 16:15:01.033399 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1849e11d_7a37_4dd5_8c18_f7306be11319.slice/crio-0c914b25bea32c960f601fd72e382c1bedeb7ea2723331e960e5ec6512ab8a65 WatchSource:0}: Error finding container 0c914b25bea32c960f601fd72e382c1bedeb7ea2723331e960e5ec6512ab8a65: Status 404 returned error can't find the container with id 0c914b25bea32c960f601fd72e382c1bedeb7ea2723331e960e5ec6512ab8a65 Dec 09 16:15:01 crc kubenswrapper[4716]: I1209 16:15:01.047616 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421615-rjf9z"] Dec 09 16:15:01 crc kubenswrapper[4716]: I1209 16:15:01.181247 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421615-rjf9z" event={"ID":"1849e11d-7a37-4dd5-8c18-f7306be11319","Type":"ContainerStarted","Data":"0c914b25bea32c960f601fd72e382c1bedeb7ea2723331e960e5ec6512ab8a65"} Dec 09 16:15:02 crc kubenswrapper[4716]: I1209 16:15:02.194297 4716 generic.go:334] "Generic (PLEG): container finished" podID="1849e11d-7a37-4dd5-8c18-f7306be11319" containerID="ea79bceb5b61c624095dce529c3612fd1d710b80384b27b11f797df0a6fa25e9" exitCode=0 Dec 09 16:15:02 crc kubenswrapper[4716]: I1209 16:15:02.194394 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421615-rjf9z" event={"ID":"1849e11d-7a37-4dd5-8c18-f7306be11319","Type":"ContainerDied","Data":"ea79bceb5b61c624095dce529c3612fd1d710b80384b27b11f797df0a6fa25e9"} Dec 09 16:15:04 crc kubenswrapper[4716]: E1209 16:15:04.215988 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:15:04 crc kubenswrapper[4716]: I1209 16:15:04.217330 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421615-rjf9z" event={"ID":"1849e11d-7a37-4dd5-8c18-f7306be11319","Type":"ContainerDied","Data":"0c914b25bea32c960f601fd72e382c1bedeb7ea2723331e960e5ec6512ab8a65"} Dec 09 16:15:04 crc kubenswrapper[4716]: I1209 16:15:04.217389 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0c914b25bea32c960f601fd72e382c1bedeb7ea2723331e960e5ec6512ab8a65" Dec 09 16:15:04 crc kubenswrapper[4716]: I1209 16:15:04.571530 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421615-rjf9z" Dec 09 16:15:04 crc kubenswrapper[4716]: I1209 16:15:04.627167 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rzzb8\" (UniqueName: \"kubernetes.io/projected/1849e11d-7a37-4dd5-8c18-f7306be11319-kube-api-access-rzzb8\") pod \"1849e11d-7a37-4dd5-8c18-f7306be11319\" (UID: \"1849e11d-7a37-4dd5-8c18-f7306be11319\") " Dec 09 16:15:04 crc kubenswrapper[4716]: I1209 16:15:04.627241 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1849e11d-7a37-4dd5-8c18-f7306be11319-secret-volume\") pod \"1849e11d-7a37-4dd5-8c18-f7306be11319\" (UID: \"1849e11d-7a37-4dd5-8c18-f7306be11319\") " Dec 09 16:15:04 crc kubenswrapper[4716]: I1209 16:15:04.627363 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1849e11d-7a37-4dd5-8c18-f7306be11319-config-volume\") pod \"1849e11d-7a37-4dd5-8c18-f7306be11319\" (UID: \"1849e11d-7a37-4dd5-8c18-f7306be11319\") " Dec 09 16:15:04 crc kubenswrapper[4716]: I1209 16:15:04.628374 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1849e11d-7a37-4dd5-8c18-f7306be11319-config-volume" (OuterVolumeSpecName: "config-volume") pod "1849e11d-7a37-4dd5-8c18-f7306be11319" (UID: "1849e11d-7a37-4dd5-8c18-f7306be11319"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:15:04 crc kubenswrapper[4716]: I1209 16:15:04.634971 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1849e11d-7a37-4dd5-8c18-f7306be11319-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "1849e11d-7a37-4dd5-8c18-f7306be11319" (UID: "1849e11d-7a37-4dd5-8c18-f7306be11319"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:15:04 crc kubenswrapper[4716]: I1209 16:15:04.639000 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1849e11d-7a37-4dd5-8c18-f7306be11319-kube-api-access-rzzb8" (OuterVolumeSpecName: "kube-api-access-rzzb8") pod "1849e11d-7a37-4dd5-8c18-f7306be11319" (UID: "1849e11d-7a37-4dd5-8c18-f7306be11319"). InnerVolumeSpecName "kube-api-access-rzzb8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:15:04 crc kubenswrapper[4716]: I1209 16:15:04.730841 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rzzb8\" (UniqueName: \"kubernetes.io/projected/1849e11d-7a37-4dd5-8c18-f7306be11319-kube-api-access-rzzb8\") on node \"crc\" DevicePath \"\"" Dec 09 16:15:04 crc kubenswrapper[4716]: I1209 16:15:04.730878 4716 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1849e11d-7a37-4dd5-8c18-f7306be11319-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 09 16:15:04 crc kubenswrapper[4716]: I1209 16:15:04.730894 4716 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1849e11d-7a37-4dd5-8c18-f7306be11319-config-volume\") on node \"crc\" DevicePath \"\"" Dec 09 16:15:05 crc kubenswrapper[4716]: E1209 16:15:05.215781 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:15:05 crc kubenswrapper[4716]: I1209 16:15:05.229682 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421615-rjf9z" Dec 09 16:15:05 crc kubenswrapper[4716]: I1209 16:15:05.651889 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421570-smgjt"] Dec 09 16:15:05 crc kubenswrapper[4716]: I1209 16:15:05.666657 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421570-smgjt"] Dec 09 16:15:07 crc kubenswrapper[4716]: I1209 16:15:07.214426 4716 scope.go:117] "RemoveContainer" containerID="490eaaf1f376369148e173c56855daddfef606020fdb646064723ff6596a6f32" Dec 09 16:15:07 crc kubenswrapper[4716]: E1209 16:15:07.215150 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:15:07 crc kubenswrapper[4716]: I1209 16:15:07.228547 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb80e47-ed2e-48c3-8332-346daa870065" path="/var/lib/kubelet/pods/3cb80e47-ed2e-48c3-8332-346daa870065/volumes" Dec 09 16:15:15 crc kubenswrapper[4716]: I1209 16:15:15.090501 4716 scope.go:117] "RemoveContainer" containerID="554a902df2fc58e489861334df7a421200b30106185ab26bf825e11708e3875f" Dec 09 16:15:17 crc kubenswrapper[4716]: E1209 16:15:17.216643 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:15:17 crc kubenswrapper[4716]: E1209 16:15:17.216651 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:15:21 crc kubenswrapper[4716]: I1209 16:15:21.215038 4716 scope.go:117] "RemoveContainer" containerID="490eaaf1f376369148e173c56855daddfef606020fdb646064723ff6596a6f32" Dec 09 16:15:21 crc kubenswrapper[4716]: E1209 16:15:21.216091 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:15:25 crc kubenswrapper[4716]: I1209 16:15:25.621259 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-rw49v"] Dec 09 16:15:25 crc kubenswrapper[4716]: E1209 16:15:25.622446 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1849e11d-7a37-4dd5-8c18-f7306be11319" containerName="collect-profiles" Dec 09 16:15:25 crc kubenswrapper[4716]: I1209 16:15:25.622468 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="1849e11d-7a37-4dd5-8c18-f7306be11319" containerName="collect-profiles" Dec 09 16:15:25 crc kubenswrapper[4716]: I1209 16:15:25.622786 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="1849e11d-7a37-4dd5-8c18-f7306be11319" containerName="collect-profiles" Dec 09 16:15:25 crc kubenswrapper[4716]: I1209 16:15:25.624674 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rw49v" Dec 09 16:15:25 crc kubenswrapper[4716]: I1209 16:15:25.633098 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rw49v"] Dec 09 16:15:25 crc kubenswrapper[4716]: I1209 16:15:25.682751 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/270770c1-9388-4113-82e3-abe1cf68183b-catalog-content\") pod \"redhat-marketplace-rw49v\" (UID: \"270770c1-9388-4113-82e3-abe1cf68183b\") " pod="openshift-marketplace/redhat-marketplace-rw49v" Dec 09 16:15:25 crc kubenswrapper[4716]: I1209 16:15:25.682811 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/270770c1-9388-4113-82e3-abe1cf68183b-utilities\") pod \"redhat-marketplace-rw49v\" (UID: \"270770c1-9388-4113-82e3-abe1cf68183b\") " pod="openshift-marketplace/redhat-marketplace-rw49v" Dec 09 16:15:25 crc kubenswrapper[4716]: I1209 16:15:25.683054 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2sp8h\" (UniqueName: \"kubernetes.io/projected/270770c1-9388-4113-82e3-abe1cf68183b-kube-api-access-2sp8h\") pod \"redhat-marketplace-rw49v\" (UID: \"270770c1-9388-4113-82e3-abe1cf68183b\") " pod="openshift-marketplace/redhat-marketplace-rw49v" Dec 09 16:15:25 crc kubenswrapper[4716]: I1209 16:15:25.786091 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2sp8h\" (UniqueName: \"kubernetes.io/projected/270770c1-9388-4113-82e3-abe1cf68183b-kube-api-access-2sp8h\") pod \"redhat-marketplace-rw49v\" (UID: \"270770c1-9388-4113-82e3-abe1cf68183b\") " pod="openshift-marketplace/redhat-marketplace-rw49v" Dec 09 16:15:25 crc kubenswrapper[4716]: I1209 16:15:25.786218 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/270770c1-9388-4113-82e3-abe1cf68183b-catalog-content\") pod \"redhat-marketplace-rw49v\" (UID: \"270770c1-9388-4113-82e3-abe1cf68183b\") " pod="openshift-marketplace/redhat-marketplace-rw49v" Dec 09 16:15:25 crc kubenswrapper[4716]: I1209 16:15:25.786293 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/270770c1-9388-4113-82e3-abe1cf68183b-utilities\") pod \"redhat-marketplace-rw49v\" (UID: \"270770c1-9388-4113-82e3-abe1cf68183b\") " pod="openshift-marketplace/redhat-marketplace-rw49v" Dec 09 16:15:25 crc kubenswrapper[4716]: I1209 16:15:25.786926 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/270770c1-9388-4113-82e3-abe1cf68183b-utilities\") pod \"redhat-marketplace-rw49v\" (UID: \"270770c1-9388-4113-82e3-abe1cf68183b\") " pod="openshift-marketplace/redhat-marketplace-rw49v" Dec 09 16:15:25 crc kubenswrapper[4716]: I1209 16:15:25.786945 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/270770c1-9388-4113-82e3-abe1cf68183b-catalog-content\") pod \"redhat-marketplace-rw49v\" (UID: \"270770c1-9388-4113-82e3-abe1cf68183b\") " pod="openshift-marketplace/redhat-marketplace-rw49v" Dec 09 16:15:25 crc kubenswrapper[4716]: I1209 16:15:25.808071 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2sp8h\" (UniqueName: \"kubernetes.io/projected/270770c1-9388-4113-82e3-abe1cf68183b-kube-api-access-2sp8h\") pod \"redhat-marketplace-rw49v\" (UID: \"270770c1-9388-4113-82e3-abe1cf68183b\") " pod="openshift-marketplace/redhat-marketplace-rw49v" Dec 09 16:15:25 crc kubenswrapper[4716]: I1209 16:15:25.945197 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rw49v" Dec 09 16:15:26 crc kubenswrapper[4716]: I1209 16:15:26.970087 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rw49v"] Dec 09 16:15:27 crc kubenswrapper[4716]: I1209 16:15:27.471478 4716 generic.go:334] "Generic (PLEG): container finished" podID="270770c1-9388-4113-82e3-abe1cf68183b" containerID="7942cd0bd941913cd79041a08b25fccfa3daa29c47717126cf86502c4cabbb27" exitCode=0 Dec 09 16:15:27 crc kubenswrapper[4716]: I1209 16:15:27.471541 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rw49v" event={"ID":"270770c1-9388-4113-82e3-abe1cf68183b","Type":"ContainerDied","Data":"7942cd0bd941913cd79041a08b25fccfa3daa29c47717126cf86502c4cabbb27"} Dec 09 16:15:27 crc kubenswrapper[4716]: I1209 16:15:27.472168 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rw49v" event={"ID":"270770c1-9388-4113-82e3-abe1cf68183b","Type":"ContainerStarted","Data":"857c7808d8fa72102d684c624729bd9768789e28bbe6bb435ed6473326cd81d7"} Dec 09 16:15:29 crc kubenswrapper[4716]: E1209 16:15:29.217530 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:15:29 crc kubenswrapper[4716]: I1209 16:15:29.508338 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rw49v" event={"ID":"270770c1-9388-4113-82e3-abe1cf68183b","Type":"ContainerStarted","Data":"ed543b19b9891a74bf65b9d3bdae8dff4405a28fce5cedade934d91e05cb5b71"} Dec 09 16:15:30 crc kubenswrapper[4716]: I1209 16:15:30.521932 4716 generic.go:334] "Generic (PLEG): container finished" podID="270770c1-9388-4113-82e3-abe1cf68183b" containerID="ed543b19b9891a74bf65b9d3bdae8dff4405a28fce5cedade934d91e05cb5b71" exitCode=0 Dec 09 16:15:30 crc kubenswrapper[4716]: I1209 16:15:30.522009 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rw49v" event={"ID":"270770c1-9388-4113-82e3-abe1cf68183b","Type":"ContainerDied","Data":"ed543b19b9891a74bf65b9d3bdae8dff4405a28fce5cedade934d91e05cb5b71"} Dec 09 16:15:31 crc kubenswrapper[4716]: I1209 16:15:31.536930 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rw49v" event={"ID":"270770c1-9388-4113-82e3-abe1cf68183b","Type":"ContainerStarted","Data":"e72bf7e36c166158889f9f481f00bd63cd6f38eabf4c46f6ccd7371f2c283fc2"} Dec 09 16:15:31 crc kubenswrapper[4716]: I1209 16:15:31.559754 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-rw49v" podStartSLOduration=3.015565808 podStartE2EDuration="6.559717596s" podCreationTimestamp="2025-12-09 16:15:25 +0000 UTC" firstStartedPulling="2025-12-09 16:15:27.473482207 +0000 UTC m=+4014.628226195" lastFinishedPulling="2025-12-09 16:15:31.017633995 +0000 UTC m=+4018.172377983" observedRunningTime="2025-12-09 16:15:31.556404182 +0000 UTC m=+4018.711148180" watchObservedRunningTime="2025-12-09 16:15:31.559717596 +0000 UTC m=+4018.714461584" Dec 09 16:15:32 crc kubenswrapper[4716]: I1209 16:15:32.214064 4716 scope.go:117] "RemoveContainer" containerID="490eaaf1f376369148e173c56855daddfef606020fdb646064723ff6596a6f32" Dec 09 16:15:32 crc kubenswrapper[4716]: E1209 16:15:32.214776 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:15:32 crc kubenswrapper[4716]: E1209 16:15:32.215358 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:15:35 crc kubenswrapper[4716]: I1209 16:15:35.946335 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-rw49v" Dec 09 16:15:35 crc kubenswrapper[4716]: I1209 16:15:35.947087 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-rw49v" Dec 09 16:15:36 crc kubenswrapper[4716]: I1209 16:15:36.002439 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-rw49v" Dec 09 16:15:36 crc kubenswrapper[4716]: I1209 16:15:36.641352 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-rw49v" Dec 09 16:15:36 crc kubenswrapper[4716]: I1209 16:15:36.694881 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rw49v"] Dec 09 16:15:38 crc kubenswrapper[4716]: I1209 16:15:38.614149 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-rw49v" podUID="270770c1-9388-4113-82e3-abe1cf68183b" containerName="registry-server" containerID="cri-o://e72bf7e36c166158889f9f481f00bd63cd6f38eabf4c46f6ccd7371f2c283fc2" gracePeriod=2 Dec 09 16:15:39 crc kubenswrapper[4716]: I1209 16:15:39.178396 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rw49v" Dec 09 16:15:39 crc kubenswrapper[4716]: I1209 16:15:39.246866 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2sp8h\" (UniqueName: \"kubernetes.io/projected/270770c1-9388-4113-82e3-abe1cf68183b-kube-api-access-2sp8h\") pod \"270770c1-9388-4113-82e3-abe1cf68183b\" (UID: \"270770c1-9388-4113-82e3-abe1cf68183b\") " Dec 09 16:15:39 crc kubenswrapper[4716]: I1209 16:15:39.247254 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/270770c1-9388-4113-82e3-abe1cf68183b-catalog-content\") pod \"270770c1-9388-4113-82e3-abe1cf68183b\" (UID: \"270770c1-9388-4113-82e3-abe1cf68183b\") " Dec 09 16:15:39 crc kubenswrapper[4716]: I1209 16:15:39.247326 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/270770c1-9388-4113-82e3-abe1cf68183b-utilities\") pod \"270770c1-9388-4113-82e3-abe1cf68183b\" (UID: \"270770c1-9388-4113-82e3-abe1cf68183b\") " Dec 09 16:15:39 crc kubenswrapper[4716]: I1209 16:15:39.249125 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/270770c1-9388-4113-82e3-abe1cf68183b-utilities" (OuterVolumeSpecName: "utilities") pod "270770c1-9388-4113-82e3-abe1cf68183b" (UID: "270770c1-9388-4113-82e3-abe1cf68183b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:15:39 crc kubenswrapper[4716]: I1209 16:15:39.255418 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/270770c1-9388-4113-82e3-abe1cf68183b-kube-api-access-2sp8h" (OuterVolumeSpecName: "kube-api-access-2sp8h") pod "270770c1-9388-4113-82e3-abe1cf68183b" (UID: "270770c1-9388-4113-82e3-abe1cf68183b"). InnerVolumeSpecName "kube-api-access-2sp8h". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:15:39 crc kubenswrapper[4716]: I1209 16:15:39.270294 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/270770c1-9388-4113-82e3-abe1cf68183b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "270770c1-9388-4113-82e3-abe1cf68183b" (UID: "270770c1-9388-4113-82e3-abe1cf68183b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:15:39 crc kubenswrapper[4716]: I1209 16:15:39.350297 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2sp8h\" (UniqueName: \"kubernetes.io/projected/270770c1-9388-4113-82e3-abe1cf68183b-kube-api-access-2sp8h\") on node \"crc\" DevicePath \"\"" Dec 09 16:15:39 crc kubenswrapper[4716]: I1209 16:15:39.350327 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/270770c1-9388-4113-82e3-abe1cf68183b-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 16:15:39 crc kubenswrapper[4716]: I1209 16:15:39.350338 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/270770c1-9388-4113-82e3-abe1cf68183b-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 16:15:39 crc kubenswrapper[4716]: I1209 16:15:39.625249 4716 generic.go:334] "Generic (PLEG): container finished" podID="270770c1-9388-4113-82e3-abe1cf68183b" containerID="e72bf7e36c166158889f9f481f00bd63cd6f38eabf4c46f6ccd7371f2c283fc2" exitCode=0 Dec 09 16:15:39 crc kubenswrapper[4716]: I1209 16:15:39.625294 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rw49v" event={"ID":"270770c1-9388-4113-82e3-abe1cf68183b","Type":"ContainerDied","Data":"e72bf7e36c166158889f9f481f00bd63cd6f38eabf4c46f6ccd7371f2c283fc2"} Dec 09 16:15:39 crc kubenswrapper[4716]: I1209 16:15:39.625323 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rw49v" event={"ID":"270770c1-9388-4113-82e3-abe1cf68183b","Type":"ContainerDied","Data":"857c7808d8fa72102d684c624729bd9768789e28bbe6bb435ed6473326cd81d7"} Dec 09 16:15:39 crc kubenswrapper[4716]: I1209 16:15:39.625341 4716 scope.go:117] "RemoveContainer" containerID="e72bf7e36c166158889f9f481f00bd63cd6f38eabf4c46f6ccd7371f2c283fc2" Dec 09 16:15:39 crc kubenswrapper[4716]: I1209 16:15:39.625471 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rw49v" Dec 09 16:15:39 crc kubenswrapper[4716]: I1209 16:15:39.663710 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rw49v"] Dec 09 16:15:39 crc kubenswrapper[4716]: I1209 16:15:39.665716 4716 scope.go:117] "RemoveContainer" containerID="ed543b19b9891a74bf65b9d3bdae8dff4405a28fce5cedade934d91e05cb5b71" Dec 09 16:15:39 crc kubenswrapper[4716]: I1209 16:15:39.676748 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-rw49v"] Dec 09 16:15:39 crc kubenswrapper[4716]: I1209 16:15:39.692026 4716 scope.go:117] "RemoveContainer" containerID="7942cd0bd941913cd79041a08b25fccfa3daa29c47717126cf86502c4cabbb27" Dec 09 16:15:39 crc kubenswrapper[4716]: I1209 16:15:39.746548 4716 scope.go:117] "RemoveContainer" containerID="e72bf7e36c166158889f9f481f00bd63cd6f38eabf4c46f6ccd7371f2c283fc2" Dec 09 16:15:39 crc kubenswrapper[4716]: E1209 16:15:39.747179 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e72bf7e36c166158889f9f481f00bd63cd6f38eabf4c46f6ccd7371f2c283fc2\": container with ID starting with e72bf7e36c166158889f9f481f00bd63cd6f38eabf4c46f6ccd7371f2c283fc2 not found: ID does not exist" containerID="e72bf7e36c166158889f9f481f00bd63cd6f38eabf4c46f6ccd7371f2c283fc2" Dec 09 16:15:39 crc kubenswrapper[4716]: I1209 16:15:39.747210 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e72bf7e36c166158889f9f481f00bd63cd6f38eabf4c46f6ccd7371f2c283fc2"} err="failed to get container status \"e72bf7e36c166158889f9f481f00bd63cd6f38eabf4c46f6ccd7371f2c283fc2\": rpc error: code = NotFound desc = could not find container \"e72bf7e36c166158889f9f481f00bd63cd6f38eabf4c46f6ccd7371f2c283fc2\": container with ID starting with e72bf7e36c166158889f9f481f00bd63cd6f38eabf4c46f6ccd7371f2c283fc2 not found: ID does not exist" Dec 09 16:15:39 crc kubenswrapper[4716]: I1209 16:15:39.747231 4716 scope.go:117] "RemoveContainer" containerID="ed543b19b9891a74bf65b9d3bdae8dff4405a28fce5cedade934d91e05cb5b71" Dec 09 16:15:39 crc kubenswrapper[4716]: E1209 16:15:39.747727 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ed543b19b9891a74bf65b9d3bdae8dff4405a28fce5cedade934d91e05cb5b71\": container with ID starting with ed543b19b9891a74bf65b9d3bdae8dff4405a28fce5cedade934d91e05cb5b71 not found: ID does not exist" containerID="ed543b19b9891a74bf65b9d3bdae8dff4405a28fce5cedade934d91e05cb5b71" Dec 09 16:15:39 crc kubenswrapper[4716]: I1209 16:15:39.747744 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed543b19b9891a74bf65b9d3bdae8dff4405a28fce5cedade934d91e05cb5b71"} err="failed to get container status \"ed543b19b9891a74bf65b9d3bdae8dff4405a28fce5cedade934d91e05cb5b71\": rpc error: code = NotFound desc = could not find container \"ed543b19b9891a74bf65b9d3bdae8dff4405a28fce5cedade934d91e05cb5b71\": container with ID starting with ed543b19b9891a74bf65b9d3bdae8dff4405a28fce5cedade934d91e05cb5b71 not found: ID does not exist" Dec 09 16:15:39 crc kubenswrapper[4716]: I1209 16:15:39.747756 4716 scope.go:117] "RemoveContainer" containerID="7942cd0bd941913cd79041a08b25fccfa3daa29c47717126cf86502c4cabbb27" Dec 09 16:15:39 crc kubenswrapper[4716]: E1209 16:15:39.748330 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7942cd0bd941913cd79041a08b25fccfa3daa29c47717126cf86502c4cabbb27\": container with ID starting with 7942cd0bd941913cd79041a08b25fccfa3daa29c47717126cf86502c4cabbb27 not found: ID does not exist" containerID="7942cd0bd941913cd79041a08b25fccfa3daa29c47717126cf86502c4cabbb27" Dec 09 16:15:39 crc kubenswrapper[4716]: I1209 16:15:39.748351 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7942cd0bd941913cd79041a08b25fccfa3daa29c47717126cf86502c4cabbb27"} err="failed to get container status \"7942cd0bd941913cd79041a08b25fccfa3daa29c47717126cf86502c4cabbb27\": rpc error: code = NotFound desc = could not find container \"7942cd0bd941913cd79041a08b25fccfa3daa29c47717126cf86502c4cabbb27\": container with ID starting with 7942cd0bd941913cd79041a08b25fccfa3daa29c47717126cf86502c4cabbb27 not found: ID does not exist" Dec 09 16:15:41 crc kubenswrapper[4716]: E1209 16:15:41.216898 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:15:41 crc kubenswrapper[4716]: I1209 16:15:41.227154 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="270770c1-9388-4113-82e3-abe1cf68183b" path="/var/lib/kubelet/pods/270770c1-9388-4113-82e3-abe1cf68183b/volumes" Dec 09 16:15:45 crc kubenswrapper[4716]: I1209 16:15:45.214049 4716 scope.go:117] "RemoveContainer" containerID="490eaaf1f376369148e173c56855daddfef606020fdb646064723ff6596a6f32" Dec 09 16:15:45 crc kubenswrapper[4716]: E1209 16:15:45.216711 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:15:45 crc kubenswrapper[4716]: E1209 16:15:45.216806 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:15:53 crc kubenswrapper[4716]: E1209 16:15:53.227011 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:15:58 crc kubenswrapper[4716]: I1209 16:15:58.214017 4716 scope.go:117] "RemoveContainer" containerID="490eaaf1f376369148e173c56855daddfef606020fdb646064723ff6596a6f32" Dec 09 16:15:58 crc kubenswrapper[4716]: E1209 16:15:58.214909 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:15:58 crc kubenswrapper[4716]: E1209 16:15:58.215469 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:16:04 crc kubenswrapper[4716]: E1209 16:16:04.217392 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:16:10 crc kubenswrapper[4716]: E1209 16:16:10.219362 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:16:13 crc kubenswrapper[4716]: I1209 16:16:13.221698 4716 scope.go:117] "RemoveContainer" containerID="490eaaf1f376369148e173c56855daddfef606020fdb646064723ff6596a6f32" Dec 09 16:16:13 crc kubenswrapper[4716]: E1209 16:16:13.222475 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:16:15 crc kubenswrapper[4716]: I1209 16:16:15.382192 4716 scope.go:117] "RemoveContainer" containerID="3c18e1bbc164bde47fa410da47d6d4a22b3163a82f16a6c6c865c2a09853ac33" Dec 09 16:16:15 crc kubenswrapper[4716]: I1209 16:16:15.406652 4716 scope.go:117] "RemoveContainer" containerID="59b09e84d0fe20ce5d7153b84556d6d6203c1a59ab514b5f5843c0289e01caad" Dec 09 16:16:15 crc kubenswrapper[4716]: I1209 16:16:15.455903 4716 scope.go:117] "RemoveContainer" containerID="0c73b42b35d96dfbd22ba33a5053ef9a93e237c06098ffcce05295c4d6644a2b" Dec 09 16:16:19 crc kubenswrapper[4716]: E1209 16:16:19.216584 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:16:22 crc kubenswrapper[4716]: E1209 16:16:22.216217 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:16:26 crc kubenswrapper[4716]: I1209 16:16:26.213780 4716 scope.go:117] "RemoveContainer" containerID="490eaaf1f376369148e173c56855daddfef606020fdb646064723ff6596a6f32" Dec 09 16:16:26 crc kubenswrapper[4716]: E1209 16:16:26.214687 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:16:33 crc kubenswrapper[4716]: E1209 16:16:33.225027 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:16:34 crc kubenswrapper[4716]: E1209 16:16:34.215290 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:16:41 crc kubenswrapper[4716]: I1209 16:16:41.214378 4716 scope.go:117] "RemoveContainer" containerID="490eaaf1f376369148e173c56855daddfef606020fdb646064723ff6596a6f32" Dec 09 16:16:41 crc kubenswrapper[4716]: E1209 16:16:41.215499 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:16:47 crc kubenswrapper[4716]: E1209 16:16:47.217318 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:16:49 crc kubenswrapper[4716]: E1209 16:16:49.217732 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:16:52 crc kubenswrapper[4716]: I1209 16:16:52.214313 4716 scope.go:117] "RemoveContainer" containerID="490eaaf1f376369148e173c56855daddfef606020fdb646064723ff6596a6f32" Dec 09 16:16:52 crc kubenswrapper[4716]: E1209 16:16:52.216150 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:16:54 crc kubenswrapper[4716]: I1209 16:16:54.722882 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-pbbbv"] Dec 09 16:16:54 crc kubenswrapper[4716]: E1209 16:16:54.723996 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="270770c1-9388-4113-82e3-abe1cf68183b" containerName="extract-utilities" Dec 09 16:16:54 crc kubenswrapper[4716]: I1209 16:16:54.724012 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="270770c1-9388-4113-82e3-abe1cf68183b" containerName="extract-utilities" Dec 09 16:16:54 crc kubenswrapper[4716]: E1209 16:16:54.724063 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="270770c1-9388-4113-82e3-abe1cf68183b" containerName="registry-server" Dec 09 16:16:54 crc kubenswrapper[4716]: I1209 16:16:54.724071 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="270770c1-9388-4113-82e3-abe1cf68183b" containerName="registry-server" Dec 09 16:16:54 crc kubenswrapper[4716]: E1209 16:16:54.724087 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="270770c1-9388-4113-82e3-abe1cf68183b" containerName="extract-content" Dec 09 16:16:54 crc kubenswrapper[4716]: I1209 16:16:54.724093 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="270770c1-9388-4113-82e3-abe1cf68183b" containerName="extract-content" Dec 09 16:16:54 crc kubenswrapper[4716]: I1209 16:16:54.724391 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="270770c1-9388-4113-82e3-abe1cf68183b" containerName="registry-server" Dec 09 16:16:54 crc kubenswrapper[4716]: I1209 16:16:54.726528 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pbbbv" Dec 09 16:16:54 crc kubenswrapper[4716]: I1209 16:16:54.738926 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pbbbv"] Dec 09 16:16:54 crc kubenswrapper[4716]: I1209 16:16:54.916475 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6d4422c-a376-46ec-84a7-12fd8b6e2372-utilities\") pod \"community-operators-pbbbv\" (UID: \"e6d4422c-a376-46ec-84a7-12fd8b6e2372\") " pod="openshift-marketplace/community-operators-pbbbv" Dec 09 16:16:54 crc kubenswrapper[4716]: I1209 16:16:54.916526 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6d4422c-a376-46ec-84a7-12fd8b6e2372-catalog-content\") pod \"community-operators-pbbbv\" (UID: \"e6d4422c-a376-46ec-84a7-12fd8b6e2372\") " pod="openshift-marketplace/community-operators-pbbbv" Dec 09 16:16:54 crc kubenswrapper[4716]: I1209 16:16:54.916583 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pfkvp\" (UniqueName: \"kubernetes.io/projected/e6d4422c-a376-46ec-84a7-12fd8b6e2372-kube-api-access-pfkvp\") pod \"community-operators-pbbbv\" (UID: \"e6d4422c-a376-46ec-84a7-12fd8b6e2372\") " pod="openshift-marketplace/community-operators-pbbbv" Dec 09 16:16:55 crc kubenswrapper[4716]: I1209 16:16:55.019333 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6d4422c-a376-46ec-84a7-12fd8b6e2372-utilities\") pod \"community-operators-pbbbv\" (UID: \"e6d4422c-a376-46ec-84a7-12fd8b6e2372\") " pod="openshift-marketplace/community-operators-pbbbv" Dec 09 16:16:55 crc kubenswrapper[4716]: I1209 16:16:55.019605 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6d4422c-a376-46ec-84a7-12fd8b6e2372-catalog-content\") pod \"community-operators-pbbbv\" (UID: \"e6d4422c-a376-46ec-84a7-12fd8b6e2372\") " pod="openshift-marketplace/community-operators-pbbbv" Dec 09 16:16:55 crc kubenswrapper[4716]: I1209 16:16:55.019682 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pfkvp\" (UniqueName: \"kubernetes.io/projected/e6d4422c-a376-46ec-84a7-12fd8b6e2372-kube-api-access-pfkvp\") pod \"community-operators-pbbbv\" (UID: \"e6d4422c-a376-46ec-84a7-12fd8b6e2372\") " pod="openshift-marketplace/community-operators-pbbbv" Dec 09 16:16:55 crc kubenswrapper[4716]: I1209 16:16:55.020183 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6d4422c-a376-46ec-84a7-12fd8b6e2372-utilities\") pod \"community-operators-pbbbv\" (UID: \"e6d4422c-a376-46ec-84a7-12fd8b6e2372\") " pod="openshift-marketplace/community-operators-pbbbv" Dec 09 16:16:55 crc kubenswrapper[4716]: I1209 16:16:55.020255 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6d4422c-a376-46ec-84a7-12fd8b6e2372-catalog-content\") pod \"community-operators-pbbbv\" (UID: \"e6d4422c-a376-46ec-84a7-12fd8b6e2372\") " pod="openshift-marketplace/community-operators-pbbbv" Dec 09 16:16:55 crc kubenswrapper[4716]: I1209 16:16:55.044551 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pfkvp\" (UniqueName: \"kubernetes.io/projected/e6d4422c-a376-46ec-84a7-12fd8b6e2372-kube-api-access-pfkvp\") pod \"community-operators-pbbbv\" (UID: \"e6d4422c-a376-46ec-84a7-12fd8b6e2372\") " pod="openshift-marketplace/community-operators-pbbbv" Dec 09 16:16:55 crc kubenswrapper[4716]: I1209 16:16:55.058138 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pbbbv" Dec 09 16:16:55 crc kubenswrapper[4716]: I1209 16:16:55.601559 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pbbbv"] Dec 09 16:16:56 crc kubenswrapper[4716]: I1209 16:16:56.505060 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pbbbv" event={"ID":"e6d4422c-a376-46ec-84a7-12fd8b6e2372","Type":"ContainerStarted","Data":"7b9c366d831a932fe3c653e13c78c4b882ad83bb03b7b0ba244ebe46aadb1d48"} Dec 09 16:16:56 crc kubenswrapper[4716]: I1209 16:16:56.505616 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pbbbv" event={"ID":"e6d4422c-a376-46ec-84a7-12fd8b6e2372","Type":"ContainerStarted","Data":"6183cdffbce462197475de09a275a8e9a1b20fad34711d23765104add085dc11"} Dec 09 16:16:57 crc kubenswrapper[4716]: I1209 16:16:57.519274 4716 generic.go:334] "Generic (PLEG): container finished" podID="e6d4422c-a376-46ec-84a7-12fd8b6e2372" containerID="7b9c366d831a932fe3c653e13c78c4b882ad83bb03b7b0ba244ebe46aadb1d48" exitCode=0 Dec 09 16:16:57 crc kubenswrapper[4716]: I1209 16:16:57.519355 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pbbbv" event={"ID":"e6d4422c-a376-46ec-84a7-12fd8b6e2372","Type":"ContainerDied","Data":"7b9c366d831a932fe3c653e13c78c4b882ad83bb03b7b0ba244ebe46aadb1d48"} Dec 09 16:16:58 crc kubenswrapper[4716]: E1209 16:16:58.217047 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:17:00 crc kubenswrapper[4716]: E1209 16:17:00.215976 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:17:00 crc kubenswrapper[4716]: I1209 16:17:00.564970 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pbbbv" event={"ID":"e6d4422c-a376-46ec-84a7-12fd8b6e2372","Type":"ContainerStarted","Data":"5df4e417ea122624533f8ad39c394cf01467a3d38039af768a14e354a9bb388f"} Dec 09 16:17:01 crc kubenswrapper[4716]: I1209 16:17:01.578418 4716 generic.go:334] "Generic (PLEG): container finished" podID="e6d4422c-a376-46ec-84a7-12fd8b6e2372" containerID="5df4e417ea122624533f8ad39c394cf01467a3d38039af768a14e354a9bb388f" exitCode=0 Dec 09 16:17:01 crc kubenswrapper[4716]: I1209 16:17:01.578491 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pbbbv" event={"ID":"e6d4422c-a376-46ec-84a7-12fd8b6e2372","Type":"ContainerDied","Data":"5df4e417ea122624533f8ad39c394cf01467a3d38039af768a14e354a9bb388f"} Dec 09 16:17:02 crc kubenswrapper[4716]: I1209 16:17:02.591956 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pbbbv" event={"ID":"e6d4422c-a376-46ec-84a7-12fd8b6e2372","Type":"ContainerStarted","Data":"bf3920fcead0324c20be57cf1ef3471832d853a85d13afac1e354f38ce5dc1ed"} Dec 09 16:17:02 crc kubenswrapper[4716]: I1209 16:17:02.623427 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-pbbbv" podStartSLOduration=4.150602424 podStartE2EDuration="8.623404211s" podCreationTimestamp="2025-12-09 16:16:54 +0000 UTC" firstStartedPulling="2025-12-09 16:16:57.52294416 +0000 UTC m=+4104.677688148" lastFinishedPulling="2025-12-09 16:17:01.995745947 +0000 UTC m=+4109.150489935" observedRunningTime="2025-12-09 16:17:02.611729069 +0000 UTC m=+4109.766473057" watchObservedRunningTime="2025-12-09 16:17:02.623404211 +0000 UTC m=+4109.778148199" Dec 09 16:17:04 crc kubenswrapper[4716]: I1209 16:17:04.214297 4716 scope.go:117] "RemoveContainer" containerID="490eaaf1f376369148e173c56855daddfef606020fdb646064723ff6596a6f32" Dec 09 16:17:04 crc kubenswrapper[4716]: E1209 16:17:04.215012 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:17:05 crc kubenswrapper[4716]: I1209 16:17:05.059297 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-pbbbv" Dec 09 16:17:05 crc kubenswrapper[4716]: I1209 16:17:05.059916 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-pbbbv" Dec 09 16:17:06 crc kubenswrapper[4716]: I1209 16:17:06.112322 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-pbbbv" podUID="e6d4422c-a376-46ec-84a7-12fd8b6e2372" containerName="registry-server" probeResult="failure" output=< Dec 09 16:17:06 crc kubenswrapper[4716]: timeout: failed to connect service ":50051" within 1s Dec 09 16:17:06 crc kubenswrapper[4716]: > Dec 09 16:17:07 crc kubenswrapper[4716]: I1209 16:17:07.646566 4716 generic.go:334] "Generic (PLEG): container finished" podID="3f192ee3-f23a-4d2f-b423-fee621bf273e" containerID="2343810a474a27e8bbabc3a21ef737002fadff4c0e20002d2bcc7ca35c814ad2" exitCode=2 Dec 09 16:17:07 crc kubenswrapper[4716]: I1209 16:17:07.646667 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-85wnn" event={"ID":"3f192ee3-f23a-4d2f-b423-fee621bf273e","Type":"ContainerDied","Data":"2343810a474a27e8bbabc3a21ef737002fadff4c0e20002d2bcc7ca35c814ad2"} Dec 09 16:17:09 crc kubenswrapper[4716]: I1209 16:17:09.106101 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-85wnn" Dec 09 16:17:09 crc kubenswrapper[4716]: I1209 16:17:09.193795 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r7xx2\" (UniqueName: \"kubernetes.io/projected/3f192ee3-f23a-4d2f-b423-fee621bf273e-kube-api-access-r7xx2\") pod \"3f192ee3-f23a-4d2f-b423-fee621bf273e\" (UID: \"3f192ee3-f23a-4d2f-b423-fee621bf273e\") " Dec 09 16:17:09 crc kubenswrapper[4716]: I1209 16:17:09.193915 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3f192ee3-f23a-4d2f-b423-fee621bf273e-inventory\") pod \"3f192ee3-f23a-4d2f-b423-fee621bf273e\" (UID: \"3f192ee3-f23a-4d2f-b423-fee621bf273e\") " Dec 09 16:17:09 crc kubenswrapper[4716]: I1209 16:17:09.194108 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3f192ee3-f23a-4d2f-b423-fee621bf273e-ssh-key\") pod \"3f192ee3-f23a-4d2f-b423-fee621bf273e\" (UID: \"3f192ee3-f23a-4d2f-b423-fee621bf273e\") " Dec 09 16:17:09 crc kubenswrapper[4716]: I1209 16:17:09.669577 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-85wnn" event={"ID":"3f192ee3-f23a-4d2f-b423-fee621bf273e","Type":"ContainerDied","Data":"e7c80c64bc8d96eed34a0986230e682f49300616bda0434160522fee53c17eb8"} Dec 09 16:17:09 crc kubenswrapper[4716]: I1209 16:17:09.669913 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e7c80c64bc8d96eed34a0986230e682f49300616bda0434160522fee53c17eb8" Dec 09 16:17:09 crc kubenswrapper[4716]: I1209 16:17:09.669656 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-85wnn" Dec 09 16:17:09 crc kubenswrapper[4716]: I1209 16:17:09.805041 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f192ee3-f23a-4d2f-b423-fee621bf273e-kube-api-access-r7xx2" (OuterVolumeSpecName: "kube-api-access-r7xx2") pod "3f192ee3-f23a-4d2f-b423-fee621bf273e" (UID: "3f192ee3-f23a-4d2f-b423-fee621bf273e"). InnerVolumeSpecName "kube-api-access-r7xx2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:17:09 crc kubenswrapper[4716]: I1209 16:17:09.810130 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r7xx2\" (UniqueName: \"kubernetes.io/projected/3f192ee3-f23a-4d2f-b423-fee621bf273e-kube-api-access-r7xx2\") on node \"crc\" DevicePath \"\"" Dec 09 16:17:09 crc kubenswrapper[4716]: I1209 16:17:09.834173 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f192ee3-f23a-4d2f-b423-fee621bf273e-inventory" (OuterVolumeSpecName: "inventory") pod "3f192ee3-f23a-4d2f-b423-fee621bf273e" (UID: "3f192ee3-f23a-4d2f-b423-fee621bf273e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:17:09 crc kubenswrapper[4716]: I1209 16:17:09.835976 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f192ee3-f23a-4d2f-b423-fee621bf273e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "3f192ee3-f23a-4d2f-b423-fee621bf273e" (UID: "3f192ee3-f23a-4d2f-b423-fee621bf273e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:17:09 crc kubenswrapper[4716]: I1209 16:17:09.912697 4716 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3f192ee3-f23a-4d2f-b423-fee621bf273e-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 09 16:17:09 crc kubenswrapper[4716]: I1209 16:17:09.912768 4716 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3f192ee3-f23a-4d2f-b423-fee621bf273e-inventory\") on node \"crc\" DevicePath \"\"" Dec 09 16:17:13 crc kubenswrapper[4716]: E1209 16:17:13.216168 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:17:15 crc kubenswrapper[4716]: I1209 16:17:15.106679 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-pbbbv" Dec 09 16:17:15 crc kubenswrapper[4716]: I1209 16:17:15.167582 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-pbbbv" Dec 09 16:17:15 crc kubenswrapper[4716]: E1209 16:17:15.216081 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:17:15 crc kubenswrapper[4716]: I1209 16:17:15.349473 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pbbbv"] Dec 09 16:17:16 crc kubenswrapper[4716]: I1209 16:17:16.213863 4716 scope.go:117] "RemoveContainer" containerID="490eaaf1f376369148e173c56855daddfef606020fdb646064723ff6596a6f32" Dec 09 16:17:16 crc kubenswrapper[4716]: E1209 16:17:16.214602 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:17:16 crc kubenswrapper[4716]: I1209 16:17:16.740841 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-pbbbv" podUID="e6d4422c-a376-46ec-84a7-12fd8b6e2372" containerName="registry-server" containerID="cri-o://bf3920fcead0324c20be57cf1ef3471832d853a85d13afac1e354f38ce5dc1ed" gracePeriod=2 Dec 09 16:17:17 crc kubenswrapper[4716]: I1209 16:17:17.224496 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pbbbv" Dec 09 16:17:17 crc kubenswrapper[4716]: I1209 16:17:17.411366 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pfkvp\" (UniqueName: \"kubernetes.io/projected/e6d4422c-a376-46ec-84a7-12fd8b6e2372-kube-api-access-pfkvp\") pod \"e6d4422c-a376-46ec-84a7-12fd8b6e2372\" (UID: \"e6d4422c-a376-46ec-84a7-12fd8b6e2372\") " Dec 09 16:17:17 crc kubenswrapper[4716]: I1209 16:17:17.411653 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6d4422c-a376-46ec-84a7-12fd8b6e2372-catalog-content\") pod \"e6d4422c-a376-46ec-84a7-12fd8b6e2372\" (UID: \"e6d4422c-a376-46ec-84a7-12fd8b6e2372\") " Dec 09 16:17:17 crc kubenswrapper[4716]: I1209 16:17:17.411923 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6d4422c-a376-46ec-84a7-12fd8b6e2372-utilities\") pod \"e6d4422c-a376-46ec-84a7-12fd8b6e2372\" (UID: \"e6d4422c-a376-46ec-84a7-12fd8b6e2372\") " Dec 09 16:17:17 crc kubenswrapper[4716]: I1209 16:17:17.412523 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e6d4422c-a376-46ec-84a7-12fd8b6e2372-utilities" (OuterVolumeSpecName: "utilities") pod "e6d4422c-a376-46ec-84a7-12fd8b6e2372" (UID: "e6d4422c-a376-46ec-84a7-12fd8b6e2372"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:17:17 crc kubenswrapper[4716]: I1209 16:17:17.412904 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6d4422c-a376-46ec-84a7-12fd8b6e2372-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 16:17:17 crc kubenswrapper[4716]: I1209 16:17:17.417662 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e6d4422c-a376-46ec-84a7-12fd8b6e2372-kube-api-access-pfkvp" (OuterVolumeSpecName: "kube-api-access-pfkvp") pod "e6d4422c-a376-46ec-84a7-12fd8b6e2372" (UID: "e6d4422c-a376-46ec-84a7-12fd8b6e2372"). InnerVolumeSpecName "kube-api-access-pfkvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:17:17 crc kubenswrapper[4716]: I1209 16:17:17.461757 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e6d4422c-a376-46ec-84a7-12fd8b6e2372-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e6d4422c-a376-46ec-84a7-12fd8b6e2372" (UID: "e6d4422c-a376-46ec-84a7-12fd8b6e2372"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:17:17 crc kubenswrapper[4716]: I1209 16:17:17.515137 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pfkvp\" (UniqueName: \"kubernetes.io/projected/e6d4422c-a376-46ec-84a7-12fd8b6e2372-kube-api-access-pfkvp\") on node \"crc\" DevicePath \"\"" Dec 09 16:17:17 crc kubenswrapper[4716]: I1209 16:17:17.515172 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6d4422c-a376-46ec-84a7-12fd8b6e2372-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 16:17:17 crc kubenswrapper[4716]: I1209 16:17:17.754381 4716 generic.go:334] "Generic (PLEG): container finished" podID="e6d4422c-a376-46ec-84a7-12fd8b6e2372" containerID="bf3920fcead0324c20be57cf1ef3471832d853a85d13afac1e354f38ce5dc1ed" exitCode=0 Dec 09 16:17:17 crc kubenswrapper[4716]: I1209 16:17:17.754454 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pbbbv" Dec 09 16:17:17 crc kubenswrapper[4716]: I1209 16:17:17.754456 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pbbbv" event={"ID":"e6d4422c-a376-46ec-84a7-12fd8b6e2372","Type":"ContainerDied","Data":"bf3920fcead0324c20be57cf1ef3471832d853a85d13afac1e354f38ce5dc1ed"} Dec 09 16:17:17 crc kubenswrapper[4716]: I1209 16:17:17.755000 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pbbbv" event={"ID":"e6d4422c-a376-46ec-84a7-12fd8b6e2372","Type":"ContainerDied","Data":"6183cdffbce462197475de09a275a8e9a1b20fad34711d23765104add085dc11"} Dec 09 16:17:17 crc kubenswrapper[4716]: I1209 16:17:17.755043 4716 scope.go:117] "RemoveContainer" containerID="bf3920fcead0324c20be57cf1ef3471832d853a85d13afac1e354f38ce5dc1ed" Dec 09 16:17:17 crc kubenswrapper[4716]: I1209 16:17:17.794231 4716 scope.go:117] "RemoveContainer" containerID="5df4e417ea122624533f8ad39c394cf01467a3d38039af768a14e354a9bb388f" Dec 09 16:17:17 crc kubenswrapper[4716]: I1209 16:17:17.794485 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pbbbv"] Dec 09 16:17:17 crc kubenswrapper[4716]: I1209 16:17:17.805290 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-pbbbv"] Dec 09 16:17:17 crc kubenswrapper[4716]: I1209 16:17:17.818515 4716 scope.go:117] "RemoveContainer" containerID="7b9c366d831a932fe3c653e13c78c4b882ad83bb03b7b0ba244ebe46aadb1d48" Dec 09 16:17:17 crc kubenswrapper[4716]: I1209 16:17:17.870097 4716 scope.go:117] "RemoveContainer" containerID="bf3920fcead0324c20be57cf1ef3471832d853a85d13afac1e354f38ce5dc1ed" Dec 09 16:17:17 crc kubenswrapper[4716]: E1209 16:17:17.870586 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf3920fcead0324c20be57cf1ef3471832d853a85d13afac1e354f38ce5dc1ed\": container with ID starting with bf3920fcead0324c20be57cf1ef3471832d853a85d13afac1e354f38ce5dc1ed not found: ID does not exist" containerID="bf3920fcead0324c20be57cf1ef3471832d853a85d13afac1e354f38ce5dc1ed" Dec 09 16:17:17 crc kubenswrapper[4716]: I1209 16:17:17.870662 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf3920fcead0324c20be57cf1ef3471832d853a85d13afac1e354f38ce5dc1ed"} err="failed to get container status \"bf3920fcead0324c20be57cf1ef3471832d853a85d13afac1e354f38ce5dc1ed\": rpc error: code = NotFound desc = could not find container \"bf3920fcead0324c20be57cf1ef3471832d853a85d13afac1e354f38ce5dc1ed\": container with ID starting with bf3920fcead0324c20be57cf1ef3471832d853a85d13afac1e354f38ce5dc1ed not found: ID does not exist" Dec 09 16:17:17 crc kubenswrapper[4716]: I1209 16:17:17.870693 4716 scope.go:117] "RemoveContainer" containerID="5df4e417ea122624533f8ad39c394cf01467a3d38039af768a14e354a9bb388f" Dec 09 16:17:17 crc kubenswrapper[4716]: E1209 16:17:17.871378 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5df4e417ea122624533f8ad39c394cf01467a3d38039af768a14e354a9bb388f\": container with ID starting with 5df4e417ea122624533f8ad39c394cf01467a3d38039af768a14e354a9bb388f not found: ID does not exist" containerID="5df4e417ea122624533f8ad39c394cf01467a3d38039af768a14e354a9bb388f" Dec 09 16:17:17 crc kubenswrapper[4716]: I1209 16:17:17.871412 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5df4e417ea122624533f8ad39c394cf01467a3d38039af768a14e354a9bb388f"} err="failed to get container status \"5df4e417ea122624533f8ad39c394cf01467a3d38039af768a14e354a9bb388f\": rpc error: code = NotFound desc = could not find container \"5df4e417ea122624533f8ad39c394cf01467a3d38039af768a14e354a9bb388f\": container with ID starting with 5df4e417ea122624533f8ad39c394cf01467a3d38039af768a14e354a9bb388f not found: ID does not exist" Dec 09 16:17:17 crc kubenswrapper[4716]: I1209 16:17:17.871432 4716 scope.go:117] "RemoveContainer" containerID="7b9c366d831a932fe3c653e13c78c4b882ad83bb03b7b0ba244ebe46aadb1d48" Dec 09 16:17:17 crc kubenswrapper[4716]: E1209 16:17:17.871752 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7b9c366d831a932fe3c653e13c78c4b882ad83bb03b7b0ba244ebe46aadb1d48\": container with ID starting with 7b9c366d831a932fe3c653e13c78c4b882ad83bb03b7b0ba244ebe46aadb1d48 not found: ID does not exist" containerID="7b9c366d831a932fe3c653e13c78c4b882ad83bb03b7b0ba244ebe46aadb1d48" Dec 09 16:17:17 crc kubenswrapper[4716]: I1209 16:17:17.871851 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b9c366d831a932fe3c653e13c78c4b882ad83bb03b7b0ba244ebe46aadb1d48"} err="failed to get container status \"7b9c366d831a932fe3c653e13c78c4b882ad83bb03b7b0ba244ebe46aadb1d48\": rpc error: code = NotFound desc = could not find container \"7b9c366d831a932fe3c653e13c78c4b882ad83bb03b7b0ba244ebe46aadb1d48\": container with ID starting with 7b9c366d831a932fe3c653e13c78c4b882ad83bb03b7b0ba244ebe46aadb1d48 not found: ID does not exist" Dec 09 16:17:19 crc kubenswrapper[4716]: I1209 16:17:19.231425 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e6d4422c-a376-46ec-84a7-12fd8b6e2372" path="/var/lib/kubelet/pods/e6d4422c-a376-46ec-84a7-12fd8b6e2372/volumes" Dec 09 16:17:26 crc kubenswrapper[4716]: E1209 16:17:26.215883 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:17:30 crc kubenswrapper[4716]: E1209 16:17:30.216794 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:17:31 crc kubenswrapper[4716]: I1209 16:17:31.219597 4716 scope.go:117] "RemoveContainer" containerID="490eaaf1f376369148e173c56855daddfef606020fdb646064723ff6596a6f32" Dec 09 16:17:31 crc kubenswrapper[4716]: E1209 16:17:31.220213 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:17:37 crc kubenswrapper[4716]: E1209 16:17:37.216874 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:17:43 crc kubenswrapper[4716]: E1209 16:17:43.222961 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:17:45 crc kubenswrapper[4716]: I1209 16:17:45.214078 4716 scope.go:117] "RemoveContainer" containerID="490eaaf1f376369148e173c56855daddfef606020fdb646064723ff6596a6f32" Dec 09 16:17:45 crc kubenswrapper[4716]: E1209 16:17:45.214733 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:17:52 crc kubenswrapper[4716]: E1209 16:17:52.215974 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:17:56 crc kubenswrapper[4716]: E1209 16:17:56.216577 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:17:58 crc kubenswrapper[4716]: I1209 16:17:58.214144 4716 scope.go:117] "RemoveContainer" containerID="490eaaf1f376369148e173c56855daddfef606020fdb646064723ff6596a6f32" Dec 09 16:17:59 crc kubenswrapper[4716]: I1209 16:17:59.205903 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerStarted","Data":"454f22297659a966d82841c395a4cbf6527133c3c74bf24a3ec30712697930b2"} Dec 09 16:18:06 crc kubenswrapper[4716]: E1209 16:18:06.216285 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:18:08 crc kubenswrapper[4716]: E1209 16:18:08.217668 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:18:21 crc kubenswrapper[4716]: E1209 16:18:21.432073 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:18:22 crc kubenswrapper[4716]: E1209 16:18:22.216812 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:18:36 crc kubenswrapper[4716]: E1209 16:18:36.216099 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:18:36 crc kubenswrapper[4716]: E1209 16:18:36.216183 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:18:49 crc kubenswrapper[4716]: I1209 16:18:49.219220 4716 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 09 16:18:49 crc kubenswrapper[4716]: E1209 16:18:49.581658 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 16:18:49 crc kubenswrapper[4716]: E1209 16:18:49.581750 4716 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 16:18:49 crc kubenswrapper[4716]: E1209 16:18:49.581920 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5fbh9fh77h667h669h94h589h59dh5bdh5bch9fh68h689h699h559h549h56fh5f5h68fh557h5cch7fh66ch6ch86h56h75h55fh58h5ffhf9h5fdq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lz724,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(29cebb2d-8cdb-49de-a29d-1d02808e46a9): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 16:18:49 crc kubenswrapper[4716]: E1209 16:18:49.582985 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:18:49 crc kubenswrapper[4716]: E1209 16:18:49.695788 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 09 16:18:49 crc kubenswrapper[4716]: E1209 16:18:49.696420 4716 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 09 16:18:49 crc kubenswrapper[4716]: E1209 16:18:49.696721 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qfm49,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-msshl_openstack(239c4119-944c-46e1-9425-285eeb6e0204): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 16:18:49 crc kubenswrapper[4716]: E1209 16:18:49.699650 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:19:04 crc kubenswrapper[4716]: E1209 16:19:04.219879 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:19:05 crc kubenswrapper[4716]: E1209 16:19:05.216313 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:19:18 crc kubenswrapper[4716]: E1209 16:19:18.216614 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:19:19 crc kubenswrapper[4716]: E1209 16:19:19.215680 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:19:30 crc kubenswrapper[4716]: E1209 16:19:30.217297 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:19:31 crc kubenswrapper[4716]: E1209 16:19:31.216246 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:19:43 crc kubenswrapper[4716]: E1209 16:19:43.224984 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:19:44 crc kubenswrapper[4716]: E1209 16:19:44.216446 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:19:46 crc kubenswrapper[4716]: I1209 16:19:46.038919 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6vkdc"] Dec 09 16:19:46 crc kubenswrapper[4716]: E1209 16:19:46.040011 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6d4422c-a376-46ec-84a7-12fd8b6e2372" containerName="registry-server" Dec 09 16:19:46 crc kubenswrapper[4716]: I1209 16:19:46.040040 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6d4422c-a376-46ec-84a7-12fd8b6e2372" containerName="registry-server" Dec 09 16:19:46 crc kubenswrapper[4716]: E1209 16:19:46.040113 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f192ee3-f23a-4d2f-b423-fee621bf273e" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 16:19:46 crc kubenswrapper[4716]: I1209 16:19:46.040126 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f192ee3-f23a-4d2f-b423-fee621bf273e" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 16:19:46 crc kubenswrapper[4716]: E1209 16:19:46.040148 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6d4422c-a376-46ec-84a7-12fd8b6e2372" containerName="extract-content" Dec 09 16:19:46 crc kubenswrapper[4716]: I1209 16:19:46.040157 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6d4422c-a376-46ec-84a7-12fd8b6e2372" containerName="extract-content" Dec 09 16:19:46 crc kubenswrapper[4716]: E1209 16:19:46.040169 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6d4422c-a376-46ec-84a7-12fd8b6e2372" containerName="extract-utilities" Dec 09 16:19:46 crc kubenswrapper[4716]: I1209 16:19:46.040177 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6d4422c-a376-46ec-84a7-12fd8b6e2372" containerName="extract-utilities" Dec 09 16:19:46 crc kubenswrapper[4716]: I1209 16:19:46.040574 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6d4422c-a376-46ec-84a7-12fd8b6e2372" containerName="registry-server" Dec 09 16:19:46 crc kubenswrapper[4716]: I1209 16:19:46.040613 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f192ee3-f23a-4d2f-b423-fee621bf273e" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 16:19:46 crc kubenswrapper[4716]: I1209 16:19:46.041866 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6vkdc" Dec 09 16:19:46 crc kubenswrapper[4716]: I1209 16:19:46.045209 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 09 16:19:46 crc kubenswrapper[4716]: I1209 16:19:46.045307 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 09 16:19:46 crc kubenswrapper[4716]: I1209 16:19:46.045466 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 09 16:19:46 crc kubenswrapper[4716]: I1209 16:19:46.045547 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-vjwcm" Dec 09 16:19:46 crc kubenswrapper[4716]: I1209 16:19:46.054587 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6vkdc"] Dec 09 16:19:46 crc kubenswrapper[4716]: I1209 16:19:46.146896 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-6vkdc\" (UID: \"ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6vkdc" Dec 09 16:19:46 crc kubenswrapper[4716]: I1209 16:19:46.147341 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gcvct\" (UniqueName: \"kubernetes.io/projected/ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e-kube-api-access-gcvct\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-6vkdc\" (UID: \"ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6vkdc" Dec 09 16:19:46 crc kubenswrapper[4716]: I1209 16:19:46.147481 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-6vkdc\" (UID: \"ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6vkdc" Dec 09 16:19:46 crc kubenswrapper[4716]: I1209 16:19:46.250065 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gcvct\" (UniqueName: \"kubernetes.io/projected/ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e-kube-api-access-gcvct\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-6vkdc\" (UID: \"ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6vkdc" Dec 09 16:19:46 crc kubenswrapper[4716]: I1209 16:19:46.250133 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-6vkdc\" (UID: \"ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6vkdc" Dec 09 16:19:46 crc kubenswrapper[4716]: I1209 16:19:46.250244 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-6vkdc\" (UID: \"ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6vkdc" Dec 09 16:19:46 crc kubenswrapper[4716]: I1209 16:19:46.256804 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-6vkdc\" (UID: \"ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6vkdc" Dec 09 16:19:46 crc kubenswrapper[4716]: I1209 16:19:46.257051 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-6vkdc\" (UID: \"ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6vkdc" Dec 09 16:19:46 crc kubenswrapper[4716]: I1209 16:19:46.267425 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gcvct\" (UniqueName: \"kubernetes.io/projected/ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e-kube-api-access-gcvct\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-6vkdc\" (UID: \"ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6vkdc" Dec 09 16:19:46 crc kubenswrapper[4716]: I1209 16:19:46.376021 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6vkdc" Dec 09 16:19:46 crc kubenswrapper[4716]: W1209 16:19:46.946047 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podff34b1b8_b04f_4c11_8e8b_ac80b98a6f9e.slice/crio-db3817d8aeefcae605ec066f17603b14d4bfbd58d313d061eb2386778559310d WatchSource:0}: Error finding container db3817d8aeefcae605ec066f17603b14d4bfbd58d313d061eb2386778559310d: Status 404 returned error can't find the container with id db3817d8aeefcae605ec066f17603b14d4bfbd58d313d061eb2386778559310d Dec 09 16:19:46 crc kubenswrapper[4716]: I1209 16:19:46.946777 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6vkdc"] Dec 09 16:19:47 crc kubenswrapper[4716]: I1209 16:19:47.700304 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6vkdc" event={"ID":"ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e","Type":"ContainerStarted","Data":"34d3478cfae69706b08b7fc11aefcdbd67522b87f341976c1b15359df2a196b0"} Dec 09 16:19:47 crc kubenswrapper[4716]: I1209 16:19:47.700777 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6vkdc" event={"ID":"ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e","Type":"ContainerStarted","Data":"db3817d8aeefcae605ec066f17603b14d4bfbd58d313d061eb2386778559310d"} Dec 09 16:19:47 crc kubenswrapper[4716]: I1209 16:19:47.723754 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6vkdc" podStartSLOduration=1.2549433269999999 podStartE2EDuration="1.723720192s" podCreationTimestamp="2025-12-09 16:19:46 +0000 UTC" firstStartedPulling="2025-12-09 16:19:46.948753687 +0000 UTC m=+4274.103497675" lastFinishedPulling="2025-12-09 16:19:47.417530552 +0000 UTC m=+4274.572274540" observedRunningTime="2025-12-09 16:19:47.714821049 +0000 UTC m=+4274.869565037" watchObservedRunningTime="2025-12-09 16:19:47.723720192 +0000 UTC m=+4274.878464180" Dec 09 16:19:58 crc kubenswrapper[4716]: E1209 16:19:58.216494 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:19:59 crc kubenswrapper[4716]: E1209 16:19:59.215923 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:20:11 crc kubenswrapper[4716]: E1209 16:20:11.215716 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:20:12 crc kubenswrapper[4716]: E1209 16:20:12.216164 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:20:17 crc kubenswrapper[4716]: I1209 16:20:17.922631 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 16:20:17 crc kubenswrapper[4716]: I1209 16:20:17.923187 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 16:20:22 crc kubenswrapper[4716]: E1209 16:20:22.216544 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:20:24 crc kubenswrapper[4716]: E1209 16:20:24.219010 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:20:36 crc kubenswrapper[4716]: E1209 16:20:36.217586 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:20:37 crc kubenswrapper[4716]: E1209 16:20:37.215984 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:20:47 crc kubenswrapper[4716]: I1209 16:20:47.921752 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 16:20:47 crc kubenswrapper[4716]: I1209 16:20:47.922272 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 16:20:49 crc kubenswrapper[4716]: E1209 16:20:49.216286 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:20:52 crc kubenswrapper[4716]: E1209 16:20:52.216700 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:20:56 crc kubenswrapper[4716]: I1209 16:20:56.920259 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-778pc"] Dec 09 16:20:56 crc kubenswrapper[4716]: I1209 16:20:56.923824 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-778pc" Dec 09 16:20:56 crc kubenswrapper[4716]: I1209 16:20:56.940127 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-778pc"] Dec 09 16:20:57 crc kubenswrapper[4716]: I1209 16:20:57.056534 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f4f9bcc-f406-4d55-98e7-30522091fb21-utilities\") pod \"redhat-operators-778pc\" (UID: \"2f4f9bcc-f406-4d55-98e7-30522091fb21\") " pod="openshift-marketplace/redhat-operators-778pc" Dec 09 16:20:57 crc kubenswrapper[4716]: I1209 16:20:57.056614 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7nxrg\" (UniqueName: \"kubernetes.io/projected/2f4f9bcc-f406-4d55-98e7-30522091fb21-kube-api-access-7nxrg\") pod \"redhat-operators-778pc\" (UID: \"2f4f9bcc-f406-4d55-98e7-30522091fb21\") " pod="openshift-marketplace/redhat-operators-778pc" Dec 09 16:20:57 crc kubenswrapper[4716]: I1209 16:20:57.056670 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f4f9bcc-f406-4d55-98e7-30522091fb21-catalog-content\") pod \"redhat-operators-778pc\" (UID: \"2f4f9bcc-f406-4d55-98e7-30522091fb21\") " pod="openshift-marketplace/redhat-operators-778pc" Dec 09 16:20:57 crc kubenswrapper[4716]: I1209 16:20:57.159075 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f4f9bcc-f406-4d55-98e7-30522091fb21-utilities\") pod \"redhat-operators-778pc\" (UID: \"2f4f9bcc-f406-4d55-98e7-30522091fb21\") " pod="openshift-marketplace/redhat-operators-778pc" Dec 09 16:20:57 crc kubenswrapper[4716]: I1209 16:20:57.159169 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7nxrg\" (UniqueName: \"kubernetes.io/projected/2f4f9bcc-f406-4d55-98e7-30522091fb21-kube-api-access-7nxrg\") pod \"redhat-operators-778pc\" (UID: \"2f4f9bcc-f406-4d55-98e7-30522091fb21\") " pod="openshift-marketplace/redhat-operators-778pc" Dec 09 16:20:57 crc kubenswrapper[4716]: I1209 16:20:57.159220 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f4f9bcc-f406-4d55-98e7-30522091fb21-catalog-content\") pod \"redhat-operators-778pc\" (UID: \"2f4f9bcc-f406-4d55-98e7-30522091fb21\") " pod="openshift-marketplace/redhat-operators-778pc" Dec 09 16:20:57 crc kubenswrapper[4716]: I1209 16:20:57.160277 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f4f9bcc-f406-4d55-98e7-30522091fb21-catalog-content\") pod \"redhat-operators-778pc\" (UID: \"2f4f9bcc-f406-4d55-98e7-30522091fb21\") " pod="openshift-marketplace/redhat-operators-778pc" Dec 09 16:20:57 crc kubenswrapper[4716]: I1209 16:20:57.162748 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f4f9bcc-f406-4d55-98e7-30522091fb21-utilities\") pod \"redhat-operators-778pc\" (UID: \"2f4f9bcc-f406-4d55-98e7-30522091fb21\") " pod="openshift-marketplace/redhat-operators-778pc" Dec 09 16:20:57 crc kubenswrapper[4716]: I1209 16:20:57.180602 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7nxrg\" (UniqueName: \"kubernetes.io/projected/2f4f9bcc-f406-4d55-98e7-30522091fb21-kube-api-access-7nxrg\") pod \"redhat-operators-778pc\" (UID: \"2f4f9bcc-f406-4d55-98e7-30522091fb21\") " pod="openshift-marketplace/redhat-operators-778pc" Dec 09 16:20:57 crc kubenswrapper[4716]: I1209 16:20:57.250027 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-778pc" Dec 09 16:20:57 crc kubenswrapper[4716]: I1209 16:20:57.758898 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-778pc"] Dec 09 16:20:58 crc kubenswrapper[4716]: I1209 16:20:58.496077 4716 generic.go:334] "Generic (PLEG): container finished" podID="2f4f9bcc-f406-4d55-98e7-30522091fb21" containerID="2dd22e6df7d50d4dab8496bbfa6bb784b9dc2e1e574a6e94406ccae431c12bb3" exitCode=0 Dec 09 16:20:58 crc kubenswrapper[4716]: I1209 16:20:58.496165 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-778pc" event={"ID":"2f4f9bcc-f406-4d55-98e7-30522091fb21","Type":"ContainerDied","Data":"2dd22e6df7d50d4dab8496bbfa6bb784b9dc2e1e574a6e94406ccae431c12bb3"} Dec 09 16:20:58 crc kubenswrapper[4716]: I1209 16:20:58.496442 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-778pc" event={"ID":"2f4f9bcc-f406-4d55-98e7-30522091fb21","Type":"ContainerStarted","Data":"0f265930205c14e90fc714a2c15712dd65a207da5ce48ed8f6e6a35c688ced31"} Dec 09 16:21:00 crc kubenswrapper[4716]: I1209 16:21:00.518412 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-778pc" event={"ID":"2f4f9bcc-f406-4d55-98e7-30522091fb21","Type":"ContainerStarted","Data":"68a4f3d782e608b621558abf12f3311a5858bfd97ca20a55c7bbfb6eeb4e7e86"} Dec 09 16:21:02 crc kubenswrapper[4716]: I1209 16:21:02.540542 4716 generic.go:334] "Generic (PLEG): container finished" podID="2f4f9bcc-f406-4d55-98e7-30522091fb21" containerID="68a4f3d782e608b621558abf12f3311a5858bfd97ca20a55c7bbfb6eeb4e7e86" exitCode=0 Dec 09 16:21:02 crc kubenswrapper[4716]: I1209 16:21:02.540650 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-778pc" event={"ID":"2f4f9bcc-f406-4d55-98e7-30522091fb21","Type":"ContainerDied","Data":"68a4f3d782e608b621558abf12f3311a5858bfd97ca20a55c7bbfb6eeb4e7e86"} Dec 09 16:21:03 crc kubenswrapper[4716]: E1209 16:21:03.227084 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:21:03 crc kubenswrapper[4716]: I1209 16:21:03.557041 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-778pc" event={"ID":"2f4f9bcc-f406-4d55-98e7-30522091fb21","Type":"ContainerStarted","Data":"46158d254518f305bca04234c8c37dedfe90aadd44e57ec93878adbc7de3bb22"} Dec 09 16:21:03 crc kubenswrapper[4716]: I1209 16:21:03.585831 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-778pc" podStartSLOduration=2.924883618 podStartE2EDuration="7.585808006s" podCreationTimestamp="2025-12-09 16:20:56 +0000 UTC" firstStartedPulling="2025-12-09 16:20:58.498023335 +0000 UTC m=+4345.652767323" lastFinishedPulling="2025-12-09 16:21:03.158947723 +0000 UTC m=+4350.313691711" observedRunningTime="2025-12-09 16:21:03.578182829 +0000 UTC m=+4350.732926817" watchObservedRunningTime="2025-12-09 16:21:03.585808006 +0000 UTC m=+4350.740551994" Dec 09 16:21:07 crc kubenswrapper[4716]: E1209 16:21:07.216176 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:21:07 crc kubenswrapper[4716]: I1209 16:21:07.250935 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-778pc" Dec 09 16:21:07 crc kubenswrapper[4716]: I1209 16:21:07.251559 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-778pc" Dec 09 16:21:08 crc kubenswrapper[4716]: I1209 16:21:08.305483 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-778pc" podUID="2f4f9bcc-f406-4d55-98e7-30522091fb21" containerName="registry-server" probeResult="failure" output=< Dec 09 16:21:08 crc kubenswrapper[4716]: timeout: failed to connect service ":50051" within 1s Dec 09 16:21:08 crc kubenswrapper[4716]: > Dec 09 16:21:14 crc kubenswrapper[4716]: E1209 16:21:14.215748 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:21:17 crc kubenswrapper[4716]: I1209 16:21:17.301631 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-778pc" Dec 09 16:21:17 crc kubenswrapper[4716]: I1209 16:21:17.355672 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-778pc" Dec 09 16:21:17 crc kubenswrapper[4716]: I1209 16:21:17.543768 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-778pc"] Dec 09 16:21:17 crc kubenswrapper[4716]: I1209 16:21:17.922122 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 16:21:17 crc kubenswrapper[4716]: I1209 16:21:17.922534 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 16:21:17 crc kubenswrapper[4716]: I1209 16:21:17.922608 4716 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" Dec 09 16:21:17 crc kubenswrapper[4716]: I1209 16:21:17.923855 4716 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"454f22297659a966d82841c395a4cbf6527133c3c74bf24a3ec30712697930b2"} pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 16:21:17 crc kubenswrapper[4716]: I1209 16:21:17.923991 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" containerID="cri-o://454f22297659a966d82841c395a4cbf6527133c3c74bf24a3ec30712697930b2" gracePeriod=600 Dec 09 16:21:18 crc kubenswrapper[4716]: I1209 16:21:18.719209 4716 generic.go:334] "Generic (PLEG): container finished" podID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerID="454f22297659a966d82841c395a4cbf6527133c3c74bf24a3ec30712697930b2" exitCode=0 Dec 09 16:21:18 crc kubenswrapper[4716]: I1209 16:21:18.719779 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-778pc" podUID="2f4f9bcc-f406-4d55-98e7-30522091fb21" containerName="registry-server" containerID="cri-o://46158d254518f305bca04234c8c37dedfe90aadd44e57ec93878adbc7de3bb22" gracePeriod=2 Dec 09 16:21:18 crc kubenswrapper[4716]: I1209 16:21:18.720108 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerDied","Data":"454f22297659a966d82841c395a4cbf6527133c3c74bf24a3ec30712697930b2"} Dec 09 16:21:18 crc kubenswrapper[4716]: I1209 16:21:18.720144 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerStarted","Data":"eefffdae028576e327313dd73908e2cff9e01c492e5c8eb5f6860a6bc5b7d875"} Dec 09 16:21:18 crc kubenswrapper[4716]: I1209 16:21:18.720177 4716 scope.go:117] "RemoveContainer" containerID="490eaaf1f376369148e173c56855daddfef606020fdb646064723ff6596a6f32" Dec 09 16:21:19 crc kubenswrapper[4716]: E1209 16:21:19.224995 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:21:19 crc kubenswrapper[4716]: I1209 16:21:19.343304 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-778pc" Dec 09 16:21:19 crc kubenswrapper[4716]: I1209 16:21:19.474933 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f4f9bcc-f406-4d55-98e7-30522091fb21-catalog-content\") pod \"2f4f9bcc-f406-4d55-98e7-30522091fb21\" (UID: \"2f4f9bcc-f406-4d55-98e7-30522091fb21\") " Dec 09 16:21:19 crc kubenswrapper[4716]: I1209 16:21:19.475082 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7nxrg\" (UniqueName: \"kubernetes.io/projected/2f4f9bcc-f406-4d55-98e7-30522091fb21-kube-api-access-7nxrg\") pod \"2f4f9bcc-f406-4d55-98e7-30522091fb21\" (UID: \"2f4f9bcc-f406-4d55-98e7-30522091fb21\") " Dec 09 16:21:19 crc kubenswrapper[4716]: I1209 16:21:19.475175 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f4f9bcc-f406-4d55-98e7-30522091fb21-utilities\") pod \"2f4f9bcc-f406-4d55-98e7-30522091fb21\" (UID: \"2f4f9bcc-f406-4d55-98e7-30522091fb21\") " Dec 09 16:21:19 crc kubenswrapper[4716]: I1209 16:21:19.475877 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f4f9bcc-f406-4d55-98e7-30522091fb21-utilities" (OuterVolumeSpecName: "utilities") pod "2f4f9bcc-f406-4d55-98e7-30522091fb21" (UID: "2f4f9bcc-f406-4d55-98e7-30522091fb21"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:21:19 crc kubenswrapper[4716]: I1209 16:21:19.481987 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f4f9bcc-f406-4d55-98e7-30522091fb21-kube-api-access-7nxrg" (OuterVolumeSpecName: "kube-api-access-7nxrg") pod "2f4f9bcc-f406-4d55-98e7-30522091fb21" (UID: "2f4f9bcc-f406-4d55-98e7-30522091fb21"). InnerVolumeSpecName "kube-api-access-7nxrg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:21:19 crc kubenswrapper[4716]: I1209 16:21:19.578095 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f4f9bcc-f406-4d55-98e7-30522091fb21-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 16:21:19 crc kubenswrapper[4716]: I1209 16:21:19.578147 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7nxrg\" (UniqueName: \"kubernetes.io/projected/2f4f9bcc-f406-4d55-98e7-30522091fb21-kube-api-access-7nxrg\") on node \"crc\" DevicePath \"\"" Dec 09 16:21:19 crc kubenswrapper[4716]: I1209 16:21:19.580741 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f4f9bcc-f406-4d55-98e7-30522091fb21-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2f4f9bcc-f406-4d55-98e7-30522091fb21" (UID: "2f4f9bcc-f406-4d55-98e7-30522091fb21"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:21:19 crc kubenswrapper[4716]: I1209 16:21:19.680024 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f4f9bcc-f406-4d55-98e7-30522091fb21-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 16:21:19 crc kubenswrapper[4716]: I1209 16:21:19.734715 4716 generic.go:334] "Generic (PLEG): container finished" podID="2f4f9bcc-f406-4d55-98e7-30522091fb21" containerID="46158d254518f305bca04234c8c37dedfe90aadd44e57ec93878adbc7de3bb22" exitCode=0 Dec 09 16:21:19 crc kubenswrapper[4716]: I1209 16:21:19.734760 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-778pc" event={"ID":"2f4f9bcc-f406-4d55-98e7-30522091fb21","Type":"ContainerDied","Data":"46158d254518f305bca04234c8c37dedfe90aadd44e57ec93878adbc7de3bb22"} Dec 09 16:21:19 crc kubenswrapper[4716]: I1209 16:21:19.734788 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-778pc" event={"ID":"2f4f9bcc-f406-4d55-98e7-30522091fb21","Type":"ContainerDied","Data":"0f265930205c14e90fc714a2c15712dd65a207da5ce48ed8f6e6a35c688ced31"} Dec 09 16:21:19 crc kubenswrapper[4716]: I1209 16:21:19.734808 4716 scope.go:117] "RemoveContainer" containerID="46158d254518f305bca04234c8c37dedfe90aadd44e57ec93878adbc7de3bb22" Dec 09 16:21:19 crc kubenswrapper[4716]: I1209 16:21:19.734948 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-778pc" Dec 09 16:21:19 crc kubenswrapper[4716]: I1209 16:21:19.798480 4716 scope.go:117] "RemoveContainer" containerID="68a4f3d782e608b621558abf12f3311a5858bfd97ca20a55c7bbfb6eeb4e7e86" Dec 09 16:21:19 crc kubenswrapper[4716]: I1209 16:21:19.803789 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-778pc"] Dec 09 16:21:19 crc kubenswrapper[4716]: I1209 16:21:19.816590 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-778pc"] Dec 09 16:21:19 crc kubenswrapper[4716]: I1209 16:21:19.827438 4716 scope.go:117] "RemoveContainer" containerID="2dd22e6df7d50d4dab8496bbfa6bb784b9dc2e1e574a6e94406ccae431c12bb3" Dec 09 16:21:19 crc kubenswrapper[4716]: I1209 16:21:19.897777 4716 scope.go:117] "RemoveContainer" containerID="46158d254518f305bca04234c8c37dedfe90aadd44e57ec93878adbc7de3bb22" Dec 09 16:21:19 crc kubenswrapper[4716]: E1209 16:21:19.900304 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"46158d254518f305bca04234c8c37dedfe90aadd44e57ec93878adbc7de3bb22\": container with ID starting with 46158d254518f305bca04234c8c37dedfe90aadd44e57ec93878adbc7de3bb22 not found: ID does not exist" containerID="46158d254518f305bca04234c8c37dedfe90aadd44e57ec93878adbc7de3bb22" Dec 09 16:21:19 crc kubenswrapper[4716]: I1209 16:21:19.900347 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"46158d254518f305bca04234c8c37dedfe90aadd44e57ec93878adbc7de3bb22"} err="failed to get container status \"46158d254518f305bca04234c8c37dedfe90aadd44e57ec93878adbc7de3bb22\": rpc error: code = NotFound desc = could not find container \"46158d254518f305bca04234c8c37dedfe90aadd44e57ec93878adbc7de3bb22\": container with ID starting with 46158d254518f305bca04234c8c37dedfe90aadd44e57ec93878adbc7de3bb22 not found: ID does not exist" Dec 09 16:21:19 crc kubenswrapper[4716]: I1209 16:21:19.900370 4716 scope.go:117] "RemoveContainer" containerID="68a4f3d782e608b621558abf12f3311a5858bfd97ca20a55c7bbfb6eeb4e7e86" Dec 09 16:21:19 crc kubenswrapper[4716]: E1209 16:21:19.900896 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"68a4f3d782e608b621558abf12f3311a5858bfd97ca20a55c7bbfb6eeb4e7e86\": container with ID starting with 68a4f3d782e608b621558abf12f3311a5858bfd97ca20a55c7bbfb6eeb4e7e86 not found: ID does not exist" containerID="68a4f3d782e608b621558abf12f3311a5858bfd97ca20a55c7bbfb6eeb4e7e86" Dec 09 16:21:19 crc kubenswrapper[4716]: I1209 16:21:19.900943 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"68a4f3d782e608b621558abf12f3311a5858bfd97ca20a55c7bbfb6eeb4e7e86"} err="failed to get container status \"68a4f3d782e608b621558abf12f3311a5858bfd97ca20a55c7bbfb6eeb4e7e86\": rpc error: code = NotFound desc = could not find container \"68a4f3d782e608b621558abf12f3311a5858bfd97ca20a55c7bbfb6eeb4e7e86\": container with ID starting with 68a4f3d782e608b621558abf12f3311a5858bfd97ca20a55c7bbfb6eeb4e7e86 not found: ID does not exist" Dec 09 16:21:19 crc kubenswrapper[4716]: I1209 16:21:19.900976 4716 scope.go:117] "RemoveContainer" containerID="2dd22e6df7d50d4dab8496bbfa6bb784b9dc2e1e574a6e94406ccae431c12bb3" Dec 09 16:21:19 crc kubenswrapper[4716]: E1209 16:21:19.902556 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2dd22e6df7d50d4dab8496bbfa6bb784b9dc2e1e574a6e94406ccae431c12bb3\": container with ID starting with 2dd22e6df7d50d4dab8496bbfa6bb784b9dc2e1e574a6e94406ccae431c12bb3 not found: ID does not exist" containerID="2dd22e6df7d50d4dab8496bbfa6bb784b9dc2e1e574a6e94406ccae431c12bb3" Dec 09 16:21:19 crc kubenswrapper[4716]: I1209 16:21:19.902583 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2dd22e6df7d50d4dab8496bbfa6bb784b9dc2e1e574a6e94406ccae431c12bb3"} err="failed to get container status \"2dd22e6df7d50d4dab8496bbfa6bb784b9dc2e1e574a6e94406ccae431c12bb3\": rpc error: code = NotFound desc = could not find container \"2dd22e6df7d50d4dab8496bbfa6bb784b9dc2e1e574a6e94406ccae431c12bb3\": container with ID starting with 2dd22e6df7d50d4dab8496bbfa6bb784b9dc2e1e574a6e94406ccae431c12bb3 not found: ID does not exist" Dec 09 16:21:21 crc kubenswrapper[4716]: I1209 16:21:21.232334 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2f4f9bcc-f406-4d55-98e7-30522091fb21" path="/var/lib/kubelet/pods/2f4f9bcc-f406-4d55-98e7-30522091fb21/volumes" Dec 09 16:21:21 crc kubenswrapper[4716]: I1209 16:21:21.949550 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-8hj6t"] Dec 09 16:21:21 crc kubenswrapper[4716]: E1209 16:21:21.950414 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f4f9bcc-f406-4d55-98e7-30522091fb21" containerName="extract-utilities" Dec 09 16:21:21 crc kubenswrapper[4716]: I1209 16:21:21.950435 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f4f9bcc-f406-4d55-98e7-30522091fb21" containerName="extract-utilities" Dec 09 16:21:21 crc kubenswrapper[4716]: E1209 16:21:21.950460 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f4f9bcc-f406-4d55-98e7-30522091fb21" containerName="extract-content" Dec 09 16:21:21 crc kubenswrapper[4716]: I1209 16:21:21.950466 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f4f9bcc-f406-4d55-98e7-30522091fb21" containerName="extract-content" Dec 09 16:21:21 crc kubenswrapper[4716]: E1209 16:21:21.950506 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f4f9bcc-f406-4d55-98e7-30522091fb21" containerName="registry-server" Dec 09 16:21:21 crc kubenswrapper[4716]: I1209 16:21:21.950512 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f4f9bcc-f406-4d55-98e7-30522091fb21" containerName="registry-server" Dec 09 16:21:21 crc kubenswrapper[4716]: I1209 16:21:21.950797 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f4f9bcc-f406-4d55-98e7-30522091fb21" containerName="registry-server" Dec 09 16:21:21 crc kubenswrapper[4716]: I1209 16:21:21.952904 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8hj6t" Dec 09 16:21:21 crc kubenswrapper[4716]: I1209 16:21:21.965062 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8hj6t"] Dec 09 16:21:22 crc kubenswrapper[4716]: I1209 16:21:22.001991 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83189221-c6eb-471f-b842-49eeafd93e8e-utilities\") pod \"certified-operators-8hj6t\" (UID: \"83189221-c6eb-471f-b842-49eeafd93e8e\") " pod="openshift-marketplace/certified-operators-8hj6t" Dec 09 16:21:22 crc kubenswrapper[4716]: I1209 16:21:22.002045 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83189221-c6eb-471f-b842-49eeafd93e8e-catalog-content\") pod \"certified-operators-8hj6t\" (UID: \"83189221-c6eb-471f-b842-49eeafd93e8e\") " pod="openshift-marketplace/certified-operators-8hj6t" Dec 09 16:21:22 crc kubenswrapper[4716]: I1209 16:21:22.002345 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8tqjk\" (UniqueName: \"kubernetes.io/projected/83189221-c6eb-471f-b842-49eeafd93e8e-kube-api-access-8tqjk\") pod \"certified-operators-8hj6t\" (UID: \"83189221-c6eb-471f-b842-49eeafd93e8e\") " pod="openshift-marketplace/certified-operators-8hj6t" Dec 09 16:21:22 crc kubenswrapper[4716]: I1209 16:21:22.105527 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83189221-c6eb-471f-b842-49eeafd93e8e-utilities\") pod \"certified-operators-8hj6t\" (UID: \"83189221-c6eb-471f-b842-49eeafd93e8e\") " pod="openshift-marketplace/certified-operators-8hj6t" Dec 09 16:21:22 crc kubenswrapper[4716]: I1209 16:21:22.106099 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83189221-c6eb-471f-b842-49eeafd93e8e-catalog-content\") pod \"certified-operators-8hj6t\" (UID: \"83189221-c6eb-471f-b842-49eeafd93e8e\") " pod="openshift-marketplace/certified-operators-8hj6t" Dec 09 16:21:22 crc kubenswrapper[4716]: I1209 16:21:22.106043 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83189221-c6eb-471f-b842-49eeafd93e8e-utilities\") pod \"certified-operators-8hj6t\" (UID: \"83189221-c6eb-471f-b842-49eeafd93e8e\") " pod="openshift-marketplace/certified-operators-8hj6t" Dec 09 16:21:22 crc kubenswrapper[4716]: I1209 16:21:22.106343 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83189221-c6eb-471f-b842-49eeafd93e8e-catalog-content\") pod \"certified-operators-8hj6t\" (UID: \"83189221-c6eb-471f-b842-49eeafd93e8e\") " pod="openshift-marketplace/certified-operators-8hj6t" Dec 09 16:21:22 crc kubenswrapper[4716]: I1209 16:21:22.106529 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8tqjk\" (UniqueName: \"kubernetes.io/projected/83189221-c6eb-471f-b842-49eeafd93e8e-kube-api-access-8tqjk\") pod \"certified-operators-8hj6t\" (UID: \"83189221-c6eb-471f-b842-49eeafd93e8e\") " pod="openshift-marketplace/certified-operators-8hj6t" Dec 09 16:21:22 crc kubenswrapper[4716]: I1209 16:21:22.125361 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8tqjk\" (UniqueName: \"kubernetes.io/projected/83189221-c6eb-471f-b842-49eeafd93e8e-kube-api-access-8tqjk\") pod \"certified-operators-8hj6t\" (UID: \"83189221-c6eb-471f-b842-49eeafd93e8e\") " pod="openshift-marketplace/certified-operators-8hj6t" Dec 09 16:21:22 crc kubenswrapper[4716]: I1209 16:21:22.279736 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8hj6t" Dec 09 16:21:22 crc kubenswrapper[4716]: W1209 16:21:22.855522 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod83189221_c6eb_471f_b842_49eeafd93e8e.slice/crio-6ea84341d1653281e2eb56168e089bcdb943115057414a4687ac66ad0c59bf60 WatchSource:0}: Error finding container 6ea84341d1653281e2eb56168e089bcdb943115057414a4687ac66ad0c59bf60: Status 404 returned error can't find the container with id 6ea84341d1653281e2eb56168e089bcdb943115057414a4687ac66ad0c59bf60 Dec 09 16:21:22 crc kubenswrapper[4716]: I1209 16:21:22.860298 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8hj6t"] Dec 09 16:21:23 crc kubenswrapper[4716]: I1209 16:21:23.781608 4716 generic.go:334] "Generic (PLEG): container finished" podID="83189221-c6eb-471f-b842-49eeafd93e8e" containerID="0e6c3221feb69d0edeedb00a00ad582884733e34eb125b042798a12426d1c47e" exitCode=0 Dec 09 16:21:23 crc kubenswrapper[4716]: I1209 16:21:23.781730 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8hj6t" event={"ID":"83189221-c6eb-471f-b842-49eeafd93e8e","Type":"ContainerDied","Data":"0e6c3221feb69d0edeedb00a00ad582884733e34eb125b042798a12426d1c47e"} Dec 09 16:21:23 crc kubenswrapper[4716]: I1209 16:21:23.781963 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8hj6t" event={"ID":"83189221-c6eb-471f-b842-49eeafd93e8e","Type":"ContainerStarted","Data":"6ea84341d1653281e2eb56168e089bcdb943115057414a4687ac66ad0c59bf60"} Dec 09 16:21:29 crc kubenswrapper[4716]: E1209 16:21:29.223126 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:21:29 crc kubenswrapper[4716]: I1209 16:21:29.863300 4716 generic.go:334] "Generic (PLEG): container finished" podID="83189221-c6eb-471f-b842-49eeafd93e8e" containerID="2ed94cd4c4cf0a89869070279bcbc5bfa5e137eed0076ce4563700bad1fd43df" exitCode=0 Dec 09 16:21:29 crc kubenswrapper[4716]: I1209 16:21:29.863430 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8hj6t" event={"ID":"83189221-c6eb-471f-b842-49eeafd93e8e","Type":"ContainerDied","Data":"2ed94cd4c4cf0a89869070279bcbc5bfa5e137eed0076ce4563700bad1fd43df"} Dec 09 16:21:30 crc kubenswrapper[4716]: E1209 16:21:30.216975 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:21:30 crc kubenswrapper[4716]: I1209 16:21:30.879178 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8hj6t" event={"ID":"83189221-c6eb-471f-b842-49eeafd93e8e","Type":"ContainerStarted","Data":"55e766f301a9c9163958071c744408aa6f711f739a764390e72bf37dd911a153"} Dec 09 16:21:30 crc kubenswrapper[4716]: I1209 16:21:30.953510 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-8hj6t" podStartSLOduration=3.29644488 podStartE2EDuration="9.953485271s" podCreationTimestamp="2025-12-09 16:21:21 +0000 UTC" firstStartedPulling="2025-12-09 16:21:23.784393385 +0000 UTC m=+4370.939137373" lastFinishedPulling="2025-12-09 16:21:30.441433776 +0000 UTC m=+4377.596177764" observedRunningTime="2025-12-09 16:21:30.94431098 +0000 UTC m=+4378.099054978" watchObservedRunningTime="2025-12-09 16:21:30.953485271 +0000 UTC m=+4378.108229259" Dec 09 16:21:32 crc kubenswrapper[4716]: I1209 16:21:32.280095 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-8hj6t" Dec 09 16:21:32 crc kubenswrapper[4716]: I1209 16:21:32.280479 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-8hj6t" Dec 09 16:21:32 crc kubenswrapper[4716]: I1209 16:21:32.332638 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-8hj6t" Dec 09 16:21:42 crc kubenswrapper[4716]: E1209 16:21:42.268588 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:21:44 crc kubenswrapper[4716]: E1209 16:21:44.216388 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:21:56 crc kubenswrapper[4716]: E1209 16:21:56.216640 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:21:57 crc kubenswrapper[4716]: I1209 16:21:57.208922 4716 trace.go:236] Trace[1270877333]: "Calculate volume metrics of registry-storage for pod openshift-image-registry/image-registry-66df7c8f76-hgf8l" (09-Dec-2025 16:21:49.901) (total time: 7307ms): Dec 09 16:21:57 crc kubenswrapper[4716]: Trace[1270877333]: [7.307646888s] [7.307646888s] END Dec 09 16:21:57 crc kubenswrapper[4716]: I1209 16:21:57.229337 4716 trace.go:236] Trace[2089153154]: "Calculate volume metrics of storage for pod openshift-logging/logging-loki-compactor-0" (09-Dec-2025 16:21:40.410) (total time: 16818ms): Dec 09 16:21:57 crc kubenswrapper[4716]: Trace[2089153154]: [16.818787326s] [16.818787326s] END Dec 09 16:21:57 crc kubenswrapper[4716]: I1209 16:21:57.530026 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-8hj6t" Dec 09 16:21:57 crc kubenswrapper[4716]: I1209 16:21:57.659182 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8hj6t"] Dec 09 16:21:57 crc kubenswrapper[4716]: I1209 16:21:57.705980 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tkb4s"] Dec 09 16:21:57 crc kubenswrapper[4716]: I1209 16:21:57.706253 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-tkb4s" podUID="d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade" containerName="registry-server" containerID="cri-o://a2e1c878c5ab307ddf6168d7ea5286ae016e2c61ede8be176b58984dbf25761f" gracePeriod=2 Dec 09 16:21:57 crc kubenswrapper[4716]: E1209 16:21:57.754481 4716 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a2e1c878c5ab307ddf6168d7ea5286ae016e2c61ede8be176b58984dbf25761f is running failed: container process not found" containerID="a2e1c878c5ab307ddf6168d7ea5286ae016e2c61ede8be176b58984dbf25761f" cmd=["grpc_health_probe","-addr=:50051"] Dec 09 16:21:57 crc kubenswrapper[4716]: E1209 16:21:57.756931 4716 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a2e1c878c5ab307ddf6168d7ea5286ae016e2c61ede8be176b58984dbf25761f is running failed: container process not found" containerID="a2e1c878c5ab307ddf6168d7ea5286ae016e2c61ede8be176b58984dbf25761f" cmd=["grpc_health_probe","-addr=:50051"] Dec 09 16:21:57 crc kubenswrapper[4716]: E1209 16:21:57.757326 4716 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a2e1c878c5ab307ddf6168d7ea5286ae016e2c61ede8be176b58984dbf25761f is running failed: container process not found" containerID="a2e1c878c5ab307ddf6168d7ea5286ae016e2c61ede8be176b58984dbf25761f" cmd=["grpc_health_probe","-addr=:50051"] Dec 09 16:21:57 crc kubenswrapper[4716]: E1209 16:21:57.757375 4716 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a2e1c878c5ab307ddf6168d7ea5286ae016e2c61ede8be176b58984dbf25761f is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/certified-operators-tkb4s" podUID="d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade" containerName="registry-server" Dec 09 16:21:58 crc kubenswrapper[4716]: E1209 16:21:58.223387 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:21:58 crc kubenswrapper[4716]: I1209 16:21:58.325839 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tkb4s" Dec 09 16:21:58 crc kubenswrapper[4716]: I1209 16:21:58.451743 4716 generic.go:334] "Generic (PLEG): container finished" podID="d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade" containerID="a2e1c878c5ab307ddf6168d7ea5286ae016e2c61ede8be176b58984dbf25761f" exitCode=0 Dec 09 16:21:58 crc kubenswrapper[4716]: I1209 16:21:58.451823 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tkb4s" Dec 09 16:21:58 crc kubenswrapper[4716]: I1209 16:21:58.451796 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tkb4s" event={"ID":"d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade","Type":"ContainerDied","Data":"a2e1c878c5ab307ddf6168d7ea5286ae016e2c61ede8be176b58984dbf25761f"} Dec 09 16:21:58 crc kubenswrapper[4716]: I1209 16:21:58.452213 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tkb4s" event={"ID":"d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade","Type":"ContainerDied","Data":"7c61a52e4c20737d5984fdc0657d10eac034be0fe3388e168fcc7b2a774caca1"} Dec 09 16:21:58 crc kubenswrapper[4716]: I1209 16:21:58.452288 4716 scope.go:117] "RemoveContainer" containerID="a2e1c878c5ab307ddf6168d7ea5286ae016e2c61ede8be176b58984dbf25761f" Dec 09 16:21:58 crc kubenswrapper[4716]: I1209 16:21:58.481398 4716 scope.go:117] "RemoveContainer" containerID="dc5d0197ab62bf9bcf8c7dc0ed8726b8f8ba7dc7a18702a239547f1dc0b71382" Dec 09 16:21:58 crc kubenswrapper[4716]: I1209 16:21:58.483214 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade-catalog-content\") pod \"d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade\" (UID: \"d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade\") " Dec 09 16:21:58 crc kubenswrapper[4716]: I1209 16:21:58.483301 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade-utilities\") pod \"d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade\" (UID: \"d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade\") " Dec 09 16:21:58 crc kubenswrapper[4716]: I1209 16:21:58.483431 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g6cwp\" (UniqueName: \"kubernetes.io/projected/d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade-kube-api-access-g6cwp\") pod \"d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade\" (UID: \"d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade\") " Dec 09 16:21:58 crc kubenswrapper[4716]: I1209 16:21:58.485132 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade-utilities" (OuterVolumeSpecName: "utilities") pod "d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade" (UID: "d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:21:58 crc kubenswrapper[4716]: I1209 16:21:58.506611 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade-kube-api-access-g6cwp" (OuterVolumeSpecName: "kube-api-access-g6cwp") pod "d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade" (UID: "d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade"). InnerVolumeSpecName "kube-api-access-g6cwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:21:58 crc kubenswrapper[4716]: I1209 16:21:58.563011 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade" (UID: "d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:21:58 crc kubenswrapper[4716]: I1209 16:21:58.586640 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 16:21:58 crc kubenswrapper[4716]: I1209 16:21:58.586680 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 16:21:58 crc kubenswrapper[4716]: I1209 16:21:58.586692 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g6cwp\" (UniqueName: \"kubernetes.io/projected/d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade-kube-api-access-g6cwp\") on node \"crc\" DevicePath \"\"" Dec 09 16:21:58 crc kubenswrapper[4716]: I1209 16:21:58.593332 4716 scope.go:117] "RemoveContainer" containerID="8e687ac9ea7155ec75f2c23cf17da5ef550f38d1804d6ca581d572ce89b57963" Dec 09 16:21:58 crc kubenswrapper[4716]: I1209 16:21:58.640338 4716 scope.go:117] "RemoveContainer" containerID="a2e1c878c5ab307ddf6168d7ea5286ae016e2c61ede8be176b58984dbf25761f" Dec 09 16:21:58 crc kubenswrapper[4716]: E1209 16:21:58.640980 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a2e1c878c5ab307ddf6168d7ea5286ae016e2c61ede8be176b58984dbf25761f\": container with ID starting with a2e1c878c5ab307ddf6168d7ea5286ae016e2c61ede8be176b58984dbf25761f not found: ID does not exist" containerID="a2e1c878c5ab307ddf6168d7ea5286ae016e2c61ede8be176b58984dbf25761f" Dec 09 16:21:58 crc kubenswrapper[4716]: I1209 16:21:58.641034 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a2e1c878c5ab307ddf6168d7ea5286ae016e2c61ede8be176b58984dbf25761f"} err="failed to get container status \"a2e1c878c5ab307ddf6168d7ea5286ae016e2c61ede8be176b58984dbf25761f\": rpc error: code = NotFound desc = could not find container \"a2e1c878c5ab307ddf6168d7ea5286ae016e2c61ede8be176b58984dbf25761f\": container with ID starting with a2e1c878c5ab307ddf6168d7ea5286ae016e2c61ede8be176b58984dbf25761f not found: ID does not exist" Dec 09 16:21:58 crc kubenswrapper[4716]: I1209 16:21:58.641074 4716 scope.go:117] "RemoveContainer" containerID="dc5d0197ab62bf9bcf8c7dc0ed8726b8f8ba7dc7a18702a239547f1dc0b71382" Dec 09 16:21:58 crc kubenswrapper[4716]: E1209 16:21:58.641329 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dc5d0197ab62bf9bcf8c7dc0ed8726b8f8ba7dc7a18702a239547f1dc0b71382\": container with ID starting with dc5d0197ab62bf9bcf8c7dc0ed8726b8f8ba7dc7a18702a239547f1dc0b71382 not found: ID does not exist" containerID="dc5d0197ab62bf9bcf8c7dc0ed8726b8f8ba7dc7a18702a239547f1dc0b71382" Dec 09 16:21:58 crc kubenswrapper[4716]: I1209 16:21:58.641358 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc5d0197ab62bf9bcf8c7dc0ed8726b8f8ba7dc7a18702a239547f1dc0b71382"} err="failed to get container status \"dc5d0197ab62bf9bcf8c7dc0ed8726b8f8ba7dc7a18702a239547f1dc0b71382\": rpc error: code = NotFound desc = could not find container \"dc5d0197ab62bf9bcf8c7dc0ed8726b8f8ba7dc7a18702a239547f1dc0b71382\": container with ID starting with dc5d0197ab62bf9bcf8c7dc0ed8726b8f8ba7dc7a18702a239547f1dc0b71382 not found: ID does not exist" Dec 09 16:21:58 crc kubenswrapper[4716]: I1209 16:21:58.641375 4716 scope.go:117] "RemoveContainer" containerID="8e687ac9ea7155ec75f2c23cf17da5ef550f38d1804d6ca581d572ce89b57963" Dec 09 16:21:58 crc kubenswrapper[4716]: E1209 16:21:58.641660 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e687ac9ea7155ec75f2c23cf17da5ef550f38d1804d6ca581d572ce89b57963\": container with ID starting with 8e687ac9ea7155ec75f2c23cf17da5ef550f38d1804d6ca581d572ce89b57963 not found: ID does not exist" containerID="8e687ac9ea7155ec75f2c23cf17da5ef550f38d1804d6ca581d572ce89b57963" Dec 09 16:21:58 crc kubenswrapper[4716]: I1209 16:21:58.641682 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e687ac9ea7155ec75f2c23cf17da5ef550f38d1804d6ca581d572ce89b57963"} err="failed to get container status \"8e687ac9ea7155ec75f2c23cf17da5ef550f38d1804d6ca581d572ce89b57963\": rpc error: code = NotFound desc = could not find container \"8e687ac9ea7155ec75f2c23cf17da5ef550f38d1804d6ca581d572ce89b57963\": container with ID starting with 8e687ac9ea7155ec75f2c23cf17da5ef550f38d1804d6ca581d572ce89b57963 not found: ID does not exist" Dec 09 16:21:58 crc kubenswrapper[4716]: I1209 16:21:58.801203 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tkb4s"] Dec 09 16:21:58 crc kubenswrapper[4716]: I1209 16:21:58.815056 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-tkb4s"] Dec 09 16:21:59 crc kubenswrapper[4716]: I1209 16:21:59.230083 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade" path="/var/lib/kubelet/pods/d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade/volumes" Dec 09 16:22:09 crc kubenswrapper[4716]: E1209 16:22:09.216479 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:22:09 crc kubenswrapper[4716]: E1209 16:22:09.216789 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:22:20 crc kubenswrapper[4716]: E1209 16:22:20.217694 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:22:21 crc kubenswrapper[4716]: E1209 16:22:21.216308 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:22:33 crc kubenswrapper[4716]: E1209 16:22:33.226588 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:22:35 crc kubenswrapper[4716]: E1209 16:22:35.217442 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:22:48 crc kubenswrapper[4716]: E1209 16:22:48.215888 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:22:48 crc kubenswrapper[4716]: E1209 16:22:48.216013 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:22:59 crc kubenswrapper[4716]: E1209 16:22:59.216166 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:23:01 crc kubenswrapper[4716]: E1209 16:23:01.216457 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:23:10 crc kubenswrapper[4716]: E1209 16:23:10.217140 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:23:12 crc kubenswrapper[4716]: E1209 16:23:12.216525 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:23:23 crc kubenswrapper[4716]: E1209 16:23:23.222617 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:23:26 crc kubenswrapper[4716]: E1209 16:23:26.216775 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:23:34 crc kubenswrapper[4716]: E1209 16:23:34.215775 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:23:37 crc kubenswrapper[4716]: E1209 16:23:37.216846 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:23:47 crc kubenswrapper[4716]: E1209 16:23:47.217458 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:23:47 crc kubenswrapper[4716]: I1209 16:23:47.922570 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 16:23:47 crc kubenswrapper[4716]: I1209 16:23:47.922971 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 16:23:49 crc kubenswrapper[4716]: E1209 16:23:49.217830 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:24:00 crc kubenswrapper[4716]: I1209 16:24:00.216002 4716 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 09 16:24:00 crc kubenswrapper[4716]: E1209 16:24:00.342951 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 09 16:24:00 crc kubenswrapper[4716]: E1209 16:24:00.343049 4716 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 09 16:24:00 crc kubenswrapper[4716]: E1209 16:24:00.343198 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qfm49,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-msshl_openstack(239c4119-944c-46e1-9425-285eeb6e0204): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 16:24:00 crc kubenswrapper[4716]: E1209 16:24:00.344412 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:24:03 crc kubenswrapper[4716]: E1209 16:24:03.344014 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 16:24:03 crc kubenswrapper[4716]: E1209 16:24:03.344592 4716 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 16:24:03 crc kubenswrapper[4716]: E1209 16:24:03.344757 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5fbh9fh77h667h669h94h589h59dh5bdh5bch9fh68h689h699h559h549h56fh5f5h68fh557h5cch7fh66ch6ch86h56h75h55fh58h5ffhf9h5fdq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lz724,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(29cebb2d-8cdb-49de-a29d-1d02808e46a9): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 16:24:03 crc kubenswrapper[4716]: E1209 16:24:03.346232 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:24:15 crc kubenswrapper[4716]: E1209 16:24:15.216341 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:24:17 crc kubenswrapper[4716]: E1209 16:24:17.221748 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:24:17 crc kubenswrapper[4716]: I1209 16:24:17.922339 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 16:24:17 crc kubenswrapper[4716]: I1209 16:24:17.922704 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 16:24:28 crc kubenswrapper[4716]: E1209 16:24:28.215942 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:24:30 crc kubenswrapper[4716]: E1209 16:24:30.215674 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:24:39 crc kubenswrapper[4716]: E1209 16:24:39.216849 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:24:41 crc kubenswrapper[4716]: E1209 16:24:41.216348 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:24:47 crc kubenswrapper[4716]: I1209 16:24:47.921899 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 16:24:47 crc kubenswrapper[4716]: I1209 16:24:47.923468 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 16:24:47 crc kubenswrapper[4716]: I1209 16:24:47.923587 4716 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" Dec 09 16:24:47 crc kubenswrapper[4716]: I1209 16:24:47.924607 4716 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"eefffdae028576e327313dd73908e2cff9e01c492e5c8eb5f6860a6bc5b7d875"} pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 16:24:47 crc kubenswrapper[4716]: I1209 16:24:47.924789 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" containerID="cri-o://eefffdae028576e327313dd73908e2cff9e01c492e5c8eb5f6860a6bc5b7d875" gracePeriod=600 Dec 09 16:24:48 crc kubenswrapper[4716]: E1209 16:24:48.050973 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:24:48 crc kubenswrapper[4716]: I1209 16:24:48.229202 4716 generic.go:334] "Generic (PLEG): container finished" podID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerID="eefffdae028576e327313dd73908e2cff9e01c492e5c8eb5f6860a6bc5b7d875" exitCode=0 Dec 09 16:24:48 crc kubenswrapper[4716]: I1209 16:24:48.229257 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerDied","Data":"eefffdae028576e327313dd73908e2cff9e01c492e5c8eb5f6860a6bc5b7d875"} Dec 09 16:24:48 crc kubenswrapper[4716]: I1209 16:24:48.229328 4716 scope.go:117] "RemoveContainer" containerID="454f22297659a966d82841c395a4cbf6527133c3c74bf24a3ec30712697930b2" Dec 09 16:24:48 crc kubenswrapper[4716]: I1209 16:24:48.232296 4716 scope.go:117] "RemoveContainer" containerID="eefffdae028576e327313dd73908e2cff9e01c492e5c8eb5f6860a6bc5b7d875" Dec 09 16:24:48 crc kubenswrapper[4716]: E1209 16:24:48.235361 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:24:50 crc kubenswrapper[4716]: E1209 16:24:50.216458 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:24:55 crc kubenswrapper[4716]: E1209 16:24:55.216235 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:25:02 crc kubenswrapper[4716]: E1209 16:25:02.472593 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:25:03 crc kubenswrapper[4716]: I1209 16:25:03.232365 4716 scope.go:117] "RemoveContainer" containerID="eefffdae028576e327313dd73908e2cff9e01c492e5c8eb5f6860a6bc5b7d875" Dec 09 16:25:03 crc kubenswrapper[4716]: E1209 16:25:03.233331 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:25:08 crc kubenswrapper[4716]: E1209 16:25:08.216731 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:25:14 crc kubenswrapper[4716]: E1209 16:25:14.217882 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:25:18 crc kubenswrapper[4716]: I1209 16:25:18.213355 4716 scope.go:117] "RemoveContainer" containerID="eefffdae028576e327313dd73908e2cff9e01c492e5c8eb5f6860a6bc5b7d875" Dec 09 16:25:18 crc kubenswrapper[4716]: E1209 16:25:18.214260 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:25:19 crc kubenswrapper[4716]: E1209 16:25:19.216612 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:25:28 crc kubenswrapper[4716]: E1209 16:25:28.215951 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:25:29 crc kubenswrapper[4716]: I1209 16:25:29.214532 4716 scope.go:117] "RemoveContainer" containerID="eefffdae028576e327313dd73908e2cff9e01c492e5c8eb5f6860a6bc5b7d875" Dec 09 16:25:29 crc kubenswrapper[4716]: E1209 16:25:29.215217 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:25:32 crc kubenswrapper[4716]: E1209 16:25:32.216413 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:25:41 crc kubenswrapper[4716]: I1209 16:25:41.214920 4716 scope.go:117] "RemoveContainer" containerID="eefffdae028576e327313dd73908e2cff9e01c492e5c8eb5f6860a6bc5b7d875" Dec 09 16:25:41 crc kubenswrapper[4716]: E1209 16:25:41.215954 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:25:41 crc kubenswrapper[4716]: E1209 16:25:41.216143 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:25:46 crc kubenswrapper[4716]: E1209 16:25:46.218119 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:25:52 crc kubenswrapper[4716]: I1209 16:25:52.213581 4716 scope.go:117] "RemoveContainer" containerID="eefffdae028576e327313dd73908e2cff9e01c492e5c8eb5f6860a6bc5b7d875" Dec 09 16:25:52 crc kubenswrapper[4716]: E1209 16:25:52.214495 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:25:56 crc kubenswrapper[4716]: E1209 16:25:56.216470 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:26:00 crc kubenswrapper[4716]: E1209 16:26:00.215290 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:26:04 crc kubenswrapper[4716]: I1209 16:26:04.214815 4716 scope.go:117] "RemoveContainer" containerID="eefffdae028576e327313dd73908e2cff9e01c492e5c8eb5f6860a6bc5b7d875" Dec 09 16:26:04 crc kubenswrapper[4716]: E1209 16:26:04.215762 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:26:08 crc kubenswrapper[4716]: E1209 16:26:08.216306 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:26:11 crc kubenswrapper[4716]: E1209 16:26:11.216243 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:26:15 crc kubenswrapper[4716]: I1209 16:26:15.213692 4716 scope.go:117] "RemoveContainer" containerID="eefffdae028576e327313dd73908e2cff9e01c492e5c8eb5f6860a6bc5b7d875" Dec 09 16:26:15 crc kubenswrapper[4716]: E1209 16:26:15.214547 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:26:20 crc kubenswrapper[4716]: E1209 16:26:20.217217 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:26:20 crc kubenswrapper[4716]: I1209 16:26:20.898866 4716 generic.go:334] "Generic (PLEG): container finished" podID="ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e" containerID="34d3478cfae69706b08b7fc11aefcdbd67522b87f341976c1b15359df2a196b0" exitCode=2 Dec 09 16:26:20 crc kubenswrapper[4716]: I1209 16:26:20.898923 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6vkdc" event={"ID":"ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e","Type":"ContainerDied","Data":"34d3478cfae69706b08b7fc11aefcdbd67522b87f341976c1b15359df2a196b0"} Dec 09 16:26:22 crc kubenswrapper[4716]: I1209 16:26:22.479719 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6vkdc" Dec 09 16:26:22 crc kubenswrapper[4716]: I1209 16:26:22.589781 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e-ssh-key\") pod \"ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e\" (UID: \"ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e\") " Dec 09 16:26:22 crc kubenswrapper[4716]: I1209 16:26:22.590189 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gcvct\" (UniqueName: \"kubernetes.io/projected/ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e-kube-api-access-gcvct\") pod \"ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e\" (UID: \"ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e\") " Dec 09 16:26:22 crc kubenswrapper[4716]: I1209 16:26:22.590249 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e-inventory\") pod \"ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e\" (UID: \"ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e\") " Dec 09 16:26:22 crc kubenswrapper[4716]: I1209 16:26:22.595835 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e-kube-api-access-gcvct" (OuterVolumeSpecName: "kube-api-access-gcvct") pod "ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e" (UID: "ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e"). InnerVolumeSpecName "kube-api-access-gcvct". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:26:22 crc kubenswrapper[4716]: I1209 16:26:22.623677 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e-inventory" (OuterVolumeSpecName: "inventory") pod "ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e" (UID: "ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:26:22 crc kubenswrapper[4716]: I1209 16:26:22.624319 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e" (UID: "ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:26:22 crc kubenswrapper[4716]: I1209 16:26:22.693486 4716 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 09 16:26:22 crc kubenswrapper[4716]: I1209 16:26:22.693528 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gcvct\" (UniqueName: \"kubernetes.io/projected/ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e-kube-api-access-gcvct\") on node \"crc\" DevicePath \"\"" Dec 09 16:26:22 crc kubenswrapper[4716]: I1209 16:26:22.693541 4716 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e-inventory\") on node \"crc\" DevicePath \"\"" Dec 09 16:26:22 crc kubenswrapper[4716]: I1209 16:26:22.928177 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6vkdc" event={"ID":"ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e","Type":"ContainerDied","Data":"db3817d8aeefcae605ec066f17603b14d4bfbd58d313d061eb2386778559310d"} Dec 09 16:26:22 crc kubenswrapper[4716]: I1209 16:26:22.928230 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="db3817d8aeefcae605ec066f17603b14d4bfbd58d313d061eb2386778559310d" Dec 09 16:26:22 crc kubenswrapper[4716]: I1209 16:26:22.928259 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-6vkdc" Dec 09 16:26:25 crc kubenswrapper[4716]: E1209 16:26:25.216333 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:26:27 crc kubenswrapper[4716]: I1209 16:26:27.214063 4716 scope.go:117] "RemoveContainer" containerID="eefffdae028576e327313dd73908e2cff9e01c492e5c8eb5f6860a6bc5b7d875" Dec 09 16:26:27 crc kubenswrapper[4716]: E1209 16:26:27.215009 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:26:34 crc kubenswrapper[4716]: E1209 16:26:34.215978 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:26:37 crc kubenswrapper[4716]: I1209 16:26:37.362832 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-4mlfx"] Dec 09 16:26:37 crc kubenswrapper[4716]: E1209 16:26:37.363932 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade" containerName="extract-utilities" Dec 09 16:26:37 crc kubenswrapper[4716]: I1209 16:26:37.363953 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade" containerName="extract-utilities" Dec 09 16:26:37 crc kubenswrapper[4716]: E1209 16:26:37.364009 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 16:26:37 crc kubenswrapper[4716]: I1209 16:26:37.364017 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 16:26:37 crc kubenswrapper[4716]: E1209 16:26:37.364036 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade" containerName="registry-server" Dec 09 16:26:37 crc kubenswrapper[4716]: I1209 16:26:37.364042 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade" containerName="registry-server" Dec 09 16:26:37 crc kubenswrapper[4716]: E1209 16:26:37.364057 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade" containerName="extract-content" Dec 09 16:26:37 crc kubenswrapper[4716]: I1209 16:26:37.364063 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade" containerName="extract-content" Dec 09 16:26:37 crc kubenswrapper[4716]: I1209 16:26:37.364947 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 16:26:37 crc kubenswrapper[4716]: I1209 16:26:37.365026 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4f22f62-f1b3-4fbf-b6eb-f7a04af28ade" containerName="registry-server" Dec 09 16:26:37 crc kubenswrapper[4716]: I1209 16:26:37.367032 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4mlfx" Dec 09 16:26:37 crc kubenswrapper[4716]: I1209 16:26:37.382883 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4mlfx"] Dec 09 16:26:37 crc kubenswrapper[4716]: I1209 16:26:37.460140 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jls66\" (UniqueName: \"kubernetes.io/projected/4e710769-f903-4325-9eef-931e48e774d8-kube-api-access-jls66\") pod \"redhat-marketplace-4mlfx\" (UID: \"4e710769-f903-4325-9eef-931e48e774d8\") " pod="openshift-marketplace/redhat-marketplace-4mlfx" Dec 09 16:26:37 crc kubenswrapper[4716]: I1209 16:26:37.460302 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e710769-f903-4325-9eef-931e48e774d8-utilities\") pod \"redhat-marketplace-4mlfx\" (UID: \"4e710769-f903-4325-9eef-931e48e774d8\") " pod="openshift-marketplace/redhat-marketplace-4mlfx" Dec 09 16:26:37 crc kubenswrapper[4716]: I1209 16:26:37.460343 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e710769-f903-4325-9eef-931e48e774d8-catalog-content\") pod \"redhat-marketplace-4mlfx\" (UID: \"4e710769-f903-4325-9eef-931e48e774d8\") " pod="openshift-marketplace/redhat-marketplace-4mlfx" Dec 09 16:26:37 crc kubenswrapper[4716]: I1209 16:26:37.562352 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jls66\" (UniqueName: \"kubernetes.io/projected/4e710769-f903-4325-9eef-931e48e774d8-kube-api-access-jls66\") pod \"redhat-marketplace-4mlfx\" (UID: \"4e710769-f903-4325-9eef-931e48e774d8\") " pod="openshift-marketplace/redhat-marketplace-4mlfx" Dec 09 16:26:37 crc kubenswrapper[4716]: I1209 16:26:37.562523 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e710769-f903-4325-9eef-931e48e774d8-utilities\") pod \"redhat-marketplace-4mlfx\" (UID: \"4e710769-f903-4325-9eef-931e48e774d8\") " pod="openshift-marketplace/redhat-marketplace-4mlfx" Dec 09 16:26:37 crc kubenswrapper[4716]: I1209 16:26:37.562568 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e710769-f903-4325-9eef-931e48e774d8-catalog-content\") pod \"redhat-marketplace-4mlfx\" (UID: \"4e710769-f903-4325-9eef-931e48e774d8\") " pod="openshift-marketplace/redhat-marketplace-4mlfx" Dec 09 16:26:37 crc kubenswrapper[4716]: I1209 16:26:37.563124 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e710769-f903-4325-9eef-931e48e774d8-catalog-content\") pod \"redhat-marketplace-4mlfx\" (UID: \"4e710769-f903-4325-9eef-931e48e774d8\") " pod="openshift-marketplace/redhat-marketplace-4mlfx" Dec 09 16:26:37 crc kubenswrapper[4716]: I1209 16:26:37.563454 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e710769-f903-4325-9eef-931e48e774d8-utilities\") pod \"redhat-marketplace-4mlfx\" (UID: \"4e710769-f903-4325-9eef-931e48e774d8\") " pod="openshift-marketplace/redhat-marketplace-4mlfx" Dec 09 16:26:37 crc kubenswrapper[4716]: I1209 16:26:37.585491 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jls66\" (UniqueName: \"kubernetes.io/projected/4e710769-f903-4325-9eef-931e48e774d8-kube-api-access-jls66\") pod \"redhat-marketplace-4mlfx\" (UID: \"4e710769-f903-4325-9eef-931e48e774d8\") " pod="openshift-marketplace/redhat-marketplace-4mlfx" Dec 09 16:26:37 crc kubenswrapper[4716]: I1209 16:26:37.688858 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4mlfx" Dec 09 16:26:38 crc kubenswrapper[4716]: I1209 16:26:38.208292 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4mlfx"] Dec 09 16:26:38 crc kubenswrapper[4716]: W1209 16:26:38.213899 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4e710769_f903_4325_9eef_931e48e774d8.slice/crio-e0761273ed295fb48fb73aade2c11fd0b62ce918af93e11c7fa191601192f354 WatchSource:0}: Error finding container e0761273ed295fb48fb73aade2c11fd0b62ce918af93e11c7fa191601192f354: Status 404 returned error can't find the container with id e0761273ed295fb48fb73aade2c11fd0b62ce918af93e11c7fa191601192f354 Dec 09 16:26:38 crc kubenswrapper[4716]: E1209 16:26:38.216827 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:26:39 crc kubenswrapper[4716]: I1209 16:26:39.082666 4716 generic.go:334] "Generic (PLEG): container finished" podID="4e710769-f903-4325-9eef-931e48e774d8" containerID="28f452457c13c828969a675b86ce34c7d56497acaf91222a71961cf711f30899" exitCode=0 Dec 09 16:26:39 crc kubenswrapper[4716]: I1209 16:26:39.082719 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4mlfx" event={"ID":"4e710769-f903-4325-9eef-931e48e774d8","Type":"ContainerDied","Data":"28f452457c13c828969a675b86ce34c7d56497acaf91222a71961cf711f30899"} Dec 09 16:26:39 crc kubenswrapper[4716]: I1209 16:26:39.082977 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4mlfx" event={"ID":"4e710769-f903-4325-9eef-931e48e774d8","Type":"ContainerStarted","Data":"e0761273ed295fb48fb73aade2c11fd0b62ce918af93e11c7fa191601192f354"} Dec 09 16:26:39 crc kubenswrapper[4716]: I1209 16:26:39.214494 4716 scope.go:117] "RemoveContainer" containerID="eefffdae028576e327313dd73908e2cff9e01c492e5c8eb5f6860a6bc5b7d875" Dec 09 16:26:39 crc kubenswrapper[4716]: E1209 16:26:39.215157 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:26:41 crc kubenswrapper[4716]: I1209 16:26:41.115991 4716 generic.go:334] "Generic (PLEG): container finished" podID="4e710769-f903-4325-9eef-931e48e774d8" containerID="73d3e1815117b8b8a8372c38e16e604e9b8eb89bc3a46e0fd235d01526bedb61" exitCode=0 Dec 09 16:26:41 crc kubenswrapper[4716]: I1209 16:26:41.116299 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4mlfx" event={"ID":"4e710769-f903-4325-9eef-931e48e774d8","Type":"ContainerDied","Data":"73d3e1815117b8b8a8372c38e16e604e9b8eb89bc3a46e0fd235d01526bedb61"} Dec 09 16:26:42 crc kubenswrapper[4716]: I1209 16:26:42.130265 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4mlfx" event={"ID":"4e710769-f903-4325-9eef-931e48e774d8","Type":"ContainerStarted","Data":"4ba34ff819a01b2c37b4f87f6dc73a1ab51108183a2d5d14d7c7c90e9ce66afd"} Dec 09 16:26:42 crc kubenswrapper[4716]: I1209 16:26:42.157151 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-4mlfx" podStartSLOduration=2.532571242 podStartE2EDuration="5.157119803s" podCreationTimestamp="2025-12-09 16:26:37 +0000 UTC" firstStartedPulling="2025-12-09 16:26:39.085576849 +0000 UTC m=+4686.240320837" lastFinishedPulling="2025-12-09 16:26:41.71012541 +0000 UTC m=+4688.864869398" observedRunningTime="2025-12-09 16:26:42.150283429 +0000 UTC m=+4689.305027417" watchObservedRunningTime="2025-12-09 16:26:42.157119803 +0000 UTC m=+4689.311863791" Dec 09 16:26:46 crc kubenswrapper[4716]: E1209 16:26:46.216248 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:26:47 crc kubenswrapper[4716]: I1209 16:26:47.689068 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-4mlfx" Dec 09 16:26:47 crc kubenswrapper[4716]: I1209 16:26:47.689559 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-4mlfx" Dec 09 16:26:47 crc kubenswrapper[4716]: I1209 16:26:47.740945 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-4mlfx" Dec 09 16:26:48 crc kubenswrapper[4716]: I1209 16:26:48.252144 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-4mlfx" Dec 09 16:26:48 crc kubenswrapper[4716]: I1209 16:26:48.310426 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4mlfx"] Dec 09 16:26:50 crc kubenswrapper[4716]: I1209 16:26:50.204775 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-4mlfx" podUID="4e710769-f903-4325-9eef-931e48e774d8" containerName="registry-server" containerID="cri-o://4ba34ff819a01b2c37b4f87f6dc73a1ab51108183a2d5d14d7c7c90e9ce66afd" gracePeriod=2 Dec 09 16:26:50 crc kubenswrapper[4716]: E1209 16:26:50.215464 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:26:50 crc kubenswrapper[4716]: I1209 16:26:50.726208 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4mlfx" Dec 09 16:26:50 crc kubenswrapper[4716]: I1209 16:26:50.807113 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e710769-f903-4325-9eef-931e48e774d8-utilities\") pod \"4e710769-f903-4325-9eef-931e48e774d8\" (UID: \"4e710769-f903-4325-9eef-931e48e774d8\") " Dec 09 16:26:50 crc kubenswrapper[4716]: I1209 16:26:50.807195 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e710769-f903-4325-9eef-931e48e774d8-catalog-content\") pod \"4e710769-f903-4325-9eef-931e48e774d8\" (UID: \"4e710769-f903-4325-9eef-931e48e774d8\") " Dec 09 16:26:50 crc kubenswrapper[4716]: I1209 16:26:50.807340 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jls66\" (UniqueName: \"kubernetes.io/projected/4e710769-f903-4325-9eef-931e48e774d8-kube-api-access-jls66\") pod \"4e710769-f903-4325-9eef-931e48e774d8\" (UID: \"4e710769-f903-4325-9eef-931e48e774d8\") " Dec 09 16:26:50 crc kubenswrapper[4716]: I1209 16:26:50.808096 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e710769-f903-4325-9eef-931e48e774d8-utilities" (OuterVolumeSpecName: "utilities") pod "4e710769-f903-4325-9eef-931e48e774d8" (UID: "4e710769-f903-4325-9eef-931e48e774d8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:26:50 crc kubenswrapper[4716]: I1209 16:26:50.813805 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e710769-f903-4325-9eef-931e48e774d8-kube-api-access-jls66" (OuterVolumeSpecName: "kube-api-access-jls66") pod "4e710769-f903-4325-9eef-931e48e774d8" (UID: "4e710769-f903-4325-9eef-931e48e774d8"). InnerVolumeSpecName "kube-api-access-jls66". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:26:50 crc kubenswrapper[4716]: I1209 16:26:50.832962 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e710769-f903-4325-9eef-931e48e774d8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4e710769-f903-4325-9eef-931e48e774d8" (UID: "4e710769-f903-4325-9eef-931e48e774d8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:26:50 crc kubenswrapper[4716]: I1209 16:26:50.910319 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e710769-f903-4325-9eef-931e48e774d8-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 16:26:50 crc kubenswrapper[4716]: I1209 16:26:50.910387 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e710769-f903-4325-9eef-931e48e774d8-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 16:26:50 crc kubenswrapper[4716]: I1209 16:26:50.910399 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jls66\" (UniqueName: \"kubernetes.io/projected/4e710769-f903-4325-9eef-931e48e774d8-kube-api-access-jls66\") on node \"crc\" DevicePath \"\"" Dec 09 16:26:51 crc kubenswrapper[4716]: I1209 16:26:51.221976 4716 generic.go:334] "Generic (PLEG): container finished" podID="4e710769-f903-4325-9eef-931e48e774d8" containerID="4ba34ff819a01b2c37b4f87f6dc73a1ab51108183a2d5d14d7c7c90e9ce66afd" exitCode=0 Dec 09 16:26:51 crc kubenswrapper[4716]: I1209 16:26:51.222074 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4mlfx" Dec 09 16:26:51 crc kubenswrapper[4716]: I1209 16:26:51.232583 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4mlfx" event={"ID":"4e710769-f903-4325-9eef-931e48e774d8","Type":"ContainerDied","Data":"4ba34ff819a01b2c37b4f87f6dc73a1ab51108183a2d5d14d7c7c90e9ce66afd"} Dec 09 16:26:51 crc kubenswrapper[4716]: I1209 16:26:51.232648 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4mlfx" event={"ID":"4e710769-f903-4325-9eef-931e48e774d8","Type":"ContainerDied","Data":"e0761273ed295fb48fb73aade2c11fd0b62ce918af93e11c7fa191601192f354"} Dec 09 16:26:51 crc kubenswrapper[4716]: I1209 16:26:51.232672 4716 scope.go:117] "RemoveContainer" containerID="4ba34ff819a01b2c37b4f87f6dc73a1ab51108183a2d5d14d7c7c90e9ce66afd" Dec 09 16:26:51 crc kubenswrapper[4716]: I1209 16:26:51.266065 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4mlfx"] Dec 09 16:26:51 crc kubenswrapper[4716]: I1209 16:26:51.266160 4716 scope.go:117] "RemoveContainer" containerID="73d3e1815117b8b8a8372c38e16e604e9b8eb89bc3a46e0fd235d01526bedb61" Dec 09 16:26:51 crc kubenswrapper[4716]: I1209 16:26:51.279054 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-4mlfx"] Dec 09 16:26:51 crc kubenswrapper[4716]: I1209 16:26:51.311411 4716 scope.go:117] "RemoveContainer" containerID="28f452457c13c828969a675b86ce34c7d56497acaf91222a71961cf711f30899" Dec 09 16:26:51 crc kubenswrapper[4716]: I1209 16:26:51.345466 4716 scope.go:117] "RemoveContainer" containerID="4ba34ff819a01b2c37b4f87f6dc73a1ab51108183a2d5d14d7c7c90e9ce66afd" Dec 09 16:26:51 crc kubenswrapper[4716]: E1209 16:26:51.346017 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ba34ff819a01b2c37b4f87f6dc73a1ab51108183a2d5d14d7c7c90e9ce66afd\": container with ID starting with 4ba34ff819a01b2c37b4f87f6dc73a1ab51108183a2d5d14d7c7c90e9ce66afd not found: ID does not exist" containerID="4ba34ff819a01b2c37b4f87f6dc73a1ab51108183a2d5d14d7c7c90e9ce66afd" Dec 09 16:26:51 crc kubenswrapper[4716]: I1209 16:26:51.346068 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ba34ff819a01b2c37b4f87f6dc73a1ab51108183a2d5d14d7c7c90e9ce66afd"} err="failed to get container status \"4ba34ff819a01b2c37b4f87f6dc73a1ab51108183a2d5d14d7c7c90e9ce66afd\": rpc error: code = NotFound desc = could not find container \"4ba34ff819a01b2c37b4f87f6dc73a1ab51108183a2d5d14d7c7c90e9ce66afd\": container with ID starting with 4ba34ff819a01b2c37b4f87f6dc73a1ab51108183a2d5d14d7c7c90e9ce66afd not found: ID does not exist" Dec 09 16:26:51 crc kubenswrapper[4716]: I1209 16:26:51.346100 4716 scope.go:117] "RemoveContainer" containerID="73d3e1815117b8b8a8372c38e16e604e9b8eb89bc3a46e0fd235d01526bedb61" Dec 09 16:26:51 crc kubenswrapper[4716]: E1209 16:26:51.346350 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"73d3e1815117b8b8a8372c38e16e604e9b8eb89bc3a46e0fd235d01526bedb61\": container with ID starting with 73d3e1815117b8b8a8372c38e16e604e9b8eb89bc3a46e0fd235d01526bedb61 not found: ID does not exist" containerID="73d3e1815117b8b8a8372c38e16e604e9b8eb89bc3a46e0fd235d01526bedb61" Dec 09 16:26:51 crc kubenswrapper[4716]: I1209 16:26:51.346369 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"73d3e1815117b8b8a8372c38e16e604e9b8eb89bc3a46e0fd235d01526bedb61"} err="failed to get container status \"73d3e1815117b8b8a8372c38e16e604e9b8eb89bc3a46e0fd235d01526bedb61\": rpc error: code = NotFound desc = could not find container \"73d3e1815117b8b8a8372c38e16e604e9b8eb89bc3a46e0fd235d01526bedb61\": container with ID starting with 73d3e1815117b8b8a8372c38e16e604e9b8eb89bc3a46e0fd235d01526bedb61 not found: ID does not exist" Dec 09 16:26:51 crc kubenswrapper[4716]: I1209 16:26:51.346383 4716 scope.go:117] "RemoveContainer" containerID="28f452457c13c828969a675b86ce34c7d56497acaf91222a71961cf711f30899" Dec 09 16:26:51 crc kubenswrapper[4716]: E1209 16:26:51.346658 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"28f452457c13c828969a675b86ce34c7d56497acaf91222a71961cf711f30899\": container with ID starting with 28f452457c13c828969a675b86ce34c7d56497acaf91222a71961cf711f30899 not found: ID does not exist" containerID="28f452457c13c828969a675b86ce34c7d56497acaf91222a71961cf711f30899" Dec 09 16:26:51 crc kubenswrapper[4716]: I1209 16:26:51.346683 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28f452457c13c828969a675b86ce34c7d56497acaf91222a71961cf711f30899"} err="failed to get container status \"28f452457c13c828969a675b86ce34c7d56497acaf91222a71961cf711f30899\": rpc error: code = NotFound desc = could not find container \"28f452457c13c828969a675b86ce34c7d56497acaf91222a71961cf711f30899\": container with ID starting with 28f452457c13c828969a675b86ce34c7d56497acaf91222a71961cf711f30899 not found: ID does not exist" Dec 09 16:26:53 crc kubenswrapper[4716]: I1209 16:26:53.230061 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4e710769-f903-4325-9eef-931e48e774d8" path="/var/lib/kubelet/pods/4e710769-f903-4325-9eef-931e48e774d8/volumes" Dec 09 16:26:54 crc kubenswrapper[4716]: I1209 16:26:54.214558 4716 scope.go:117] "RemoveContainer" containerID="eefffdae028576e327313dd73908e2cff9e01c492e5c8eb5f6860a6bc5b7d875" Dec 09 16:26:54 crc kubenswrapper[4716]: E1209 16:26:54.215167 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:26:58 crc kubenswrapper[4716]: E1209 16:26:58.217074 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:27:01 crc kubenswrapper[4716]: E1209 16:27:01.216123 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:27:05 crc kubenswrapper[4716]: I1209 16:27:05.214007 4716 scope.go:117] "RemoveContainer" containerID="eefffdae028576e327313dd73908e2cff9e01c492e5c8eb5f6860a6bc5b7d875" Dec 09 16:27:05 crc kubenswrapper[4716]: E1209 16:27:05.214752 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:27:11 crc kubenswrapper[4716]: E1209 16:27:11.215782 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:27:15 crc kubenswrapper[4716]: E1209 16:27:15.216548 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:27:20 crc kubenswrapper[4716]: I1209 16:27:20.213526 4716 scope.go:117] "RemoveContainer" containerID="eefffdae028576e327313dd73908e2cff9e01c492e5c8eb5f6860a6bc5b7d875" Dec 09 16:27:20 crc kubenswrapper[4716]: E1209 16:27:20.214338 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:27:23 crc kubenswrapper[4716]: E1209 16:27:23.224894 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:27:27 crc kubenswrapper[4716]: E1209 16:27:27.217236 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:27:32 crc kubenswrapper[4716]: I1209 16:27:32.214608 4716 scope.go:117] "RemoveContainer" containerID="eefffdae028576e327313dd73908e2cff9e01c492e5c8eb5f6860a6bc5b7d875" Dec 09 16:27:32 crc kubenswrapper[4716]: E1209 16:27:32.215472 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:27:38 crc kubenswrapper[4716]: E1209 16:27:38.216339 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:27:42 crc kubenswrapper[4716]: E1209 16:27:42.220823 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:27:44 crc kubenswrapper[4716]: I1209 16:27:44.192924 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-pd7rx"] Dec 09 16:27:44 crc kubenswrapper[4716]: E1209 16:27:44.194186 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e710769-f903-4325-9eef-931e48e774d8" containerName="registry-server" Dec 09 16:27:44 crc kubenswrapper[4716]: I1209 16:27:44.194210 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e710769-f903-4325-9eef-931e48e774d8" containerName="registry-server" Dec 09 16:27:44 crc kubenswrapper[4716]: E1209 16:27:44.194226 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e710769-f903-4325-9eef-931e48e774d8" containerName="extract-utilities" Dec 09 16:27:44 crc kubenswrapper[4716]: I1209 16:27:44.194235 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e710769-f903-4325-9eef-931e48e774d8" containerName="extract-utilities" Dec 09 16:27:44 crc kubenswrapper[4716]: E1209 16:27:44.194300 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e710769-f903-4325-9eef-931e48e774d8" containerName="extract-content" Dec 09 16:27:44 crc kubenswrapper[4716]: I1209 16:27:44.194309 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e710769-f903-4325-9eef-931e48e774d8" containerName="extract-content" Dec 09 16:27:44 crc kubenswrapper[4716]: I1209 16:27:44.194690 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e710769-f903-4325-9eef-931e48e774d8" containerName="registry-server" Dec 09 16:27:44 crc kubenswrapper[4716]: I1209 16:27:44.196966 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pd7rx" Dec 09 16:27:44 crc kubenswrapper[4716]: I1209 16:27:44.205612 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pd7rx"] Dec 09 16:27:44 crc kubenswrapper[4716]: I1209 16:27:44.314142 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8kgxb\" (UniqueName: \"kubernetes.io/projected/3d6f79b4-224e-4527-9476-63459d3a1da0-kube-api-access-8kgxb\") pod \"community-operators-pd7rx\" (UID: \"3d6f79b4-224e-4527-9476-63459d3a1da0\") " pod="openshift-marketplace/community-operators-pd7rx" Dec 09 16:27:44 crc kubenswrapper[4716]: I1209 16:27:44.314241 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d6f79b4-224e-4527-9476-63459d3a1da0-catalog-content\") pod \"community-operators-pd7rx\" (UID: \"3d6f79b4-224e-4527-9476-63459d3a1da0\") " pod="openshift-marketplace/community-operators-pd7rx" Dec 09 16:27:44 crc kubenswrapper[4716]: I1209 16:27:44.314672 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d6f79b4-224e-4527-9476-63459d3a1da0-utilities\") pod \"community-operators-pd7rx\" (UID: \"3d6f79b4-224e-4527-9476-63459d3a1da0\") " pod="openshift-marketplace/community-operators-pd7rx" Dec 09 16:27:44 crc kubenswrapper[4716]: I1209 16:27:44.417490 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d6f79b4-224e-4527-9476-63459d3a1da0-utilities\") pod \"community-operators-pd7rx\" (UID: \"3d6f79b4-224e-4527-9476-63459d3a1da0\") " pod="openshift-marketplace/community-operators-pd7rx" Dec 09 16:27:44 crc kubenswrapper[4716]: I1209 16:27:44.417891 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8kgxb\" (UniqueName: \"kubernetes.io/projected/3d6f79b4-224e-4527-9476-63459d3a1da0-kube-api-access-8kgxb\") pod \"community-operators-pd7rx\" (UID: \"3d6f79b4-224e-4527-9476-63459d3a1da0\") " pod="openshift-marketplace/community-operators-pd7rx" Dec 09 16:27:44 crc kubenswrapper[4716]: I1209 16:27:44.417931 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d6f79b4-224e-4527-9476-63459d3a1da0-catalog-content\") pod \"community-operators-pd7rx\" (UID: \"3d6f79b4-224e-4527-9476-63459d3a1da0\") " pod="openshift-marketplace/community-operators-pd7rx" Dec 09 16:27:44 crc kubenswrapper[4716]: I1209 16:27:44.418147 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d6f79b4-224e-4527-9476-63459d3a1da0-utilities\") pod \"community-operators-pd7rx\" (UID: \"3d6f79b4-224e-4527-9476-63459d3a1da0\") " pod="openshift-marketplace/community-operators-pd7rx" Dec 09 16:27:44 crc kubenswrapper[4716]: I1209 16:27:44.418320 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d6f79b4-224e-4527-9476-63459d3a1da0-catalog-content\") pod \"community-operators-pd7rx\" (UID: \"3d6f79b4-224e-4527-9476-63459d3a1da0\") " pod="openshift-marketplace/community-operators-pd7rx" Dec 09 16:27:44 crc kubenswrapper[4716]: I1209 16:27:44.437207 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8kgxb\" (UniqueName: \"kubernetes.io/projected/3d6f79b4-224e-4527-9476-63459d3a1da0-kube-api-access-8kgxb\") pod \"community-operators-pd7rx\" (UID: \"3d6f79b4-224e-4527-9476-63459d3a1da0\") " pod="openshift-marketplace/community-operators-pd7rx" Dec 09 16:27:44 crc kubenswrapper[4716]: I1209 16:27:44.558279 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pd7rx" Dec 09 16:27:45 crc kubenswrapper[4716]: I1209 16:27:45.142007 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pd7rx"] Dec 09 16:27:45 crc kubenswrapper[4716]: I1209 16:27:45.214361 4716 scope.go:117] "RemoveContainer" containerID="eefffdae028576e327313dd73908e2cff9e01c492e5c8eb5f6860a6bc5b7d875" Dec 09 16:27:45 crc kubenswrapper[4716]: E1209 16:27:45.214682 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:27:45 crc kubenswrapper[4716]: I1209 16:27:45.806057 4716 generic.go:334] "Generic (PLEG): container finished" podID="3d6f79b4-224e-4527-9476-63459d3a1da0" containerID="b28834c87d4774eb4314159ff413a115697e59951468713c46f2017725b7a862" exitCode=0 Dec 09 16:27:45 crc kubenswrapper[4716]: I1209 16:27:45.806386 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pd7rx" event={"ID":"3d6f79b4-224e-4527-9476-63459d3a1da0","Type":"ContainerDied","Data":"b28834c87d4774eb4314159ff413a115697e59951468713c46f2017725b7a862"} Dec 09 16:27:45 crc kubenswrapper[4716]: I1209 16:27:45.806421 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pd7rx" event={"ID":"3d6f79b4-224e-4527-9476-63459d3a1da0","Type":"ContainerStarted","Data":"07c50a64c47b29b80c687e415d0d50442fc8e06382426747419d255c20ec6cec"} Dec 09 16:27:47 crc kubenswrapper[4716]: I1209 16:27:47.828137 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pd7rx" event={"ID":"3d6f79b4-224e-4527-9476-63459d3a1da0","Type":"ContainerStarted","Data":"ed4c62c4206e73757786a930db4c41e89574ce37376c287df72a927c33235aaa"} Dec 09 16:27:49 crc kubenswrapper[4716]: I1209 16:27:49.853394 4716 generic.go:334] "Generic (PLEG): container finished" podID="3d6f79b4-224e-4527-9476-63459d3a1da0" containerID="ed4c62c4206e73757786a930db4c41e89574ce37376c287df72a927c33235aaa" exitCode=0 Dec 09 16:27:49 crc kubenswrapper[4716]: I1209 16:27:49.853882 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pd7rx" event={"ID":"3d6f79b4-224e-4527-9476-63459d3a1da0","Type":"ContainerDied","Data":"ed4c62c4206e73757786a930db4c41e89574ce37376c287df72a927c33235aaa"} Dec 09 16:27:50 crc kubenswrapper[4716]: E1209 16:27:50.214844 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:27:51 crc kubenswrapper[4716]: I1209 16:27:51.879656 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pd7rx" event={"ID":"3d6f79b4-224e-4527-9476-63459d3a1da0","Type":"ContainerStarted","Data":"1d2a72d89be0157f20bd767c5b6dd2225e014bdd9fd465f33025fd203684a717"} Dec 09 16:27:51 crc kubenswrapper[4716]: I1209 16:27:51.904512 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-pd7rx" podStartSLOduration=3.329469603 podStartE2EDuration="7.904484101s" podCreationTimestamp="2025-12-09 16:27:44 +0000 UTC" firstStartedPulling="2025-12-09 16:27:45.817887238 +0000 UTC m=+4752.972631216" lastFinishedPulling="2025-12-09 16:27:50.392901726 +0000 UTC m=+4757.547645714" observedRunningTime="2025-12-09 16:27:51.899720425 +0000 UTC m=+4759.054464423" watchObservedRunningTime="2025-12-09 16:27:51.904484101 +0000 UTC m=+4759.059228089" Dec 09 16:27:54 crc kubenswrapper[4716]: I1209 16:27:54.558686 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-pd7rx" Dec 09 16:27:54 crc kubenswrapper[4716]: I1209 16:27:54.559414 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-pd7rx" Dec 09 16:27:54 crc kubenswrapper[4716]: I1209 16:27:54.617750 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-pd7rx" Dec 09 16:27:56 crc kubenswrapper[4716]: E1209 16:27:56.216482 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:28:00 crc kubenswrapper[4716]: I1209 16:28:00.213765 4716 scope.go:117] "RemoveContainer" containerID="eefffdae028576e327313dd73908e2cff9e01c492e5c8eb5f6860a6bc5b7d875" Dec 09 16:28:00 crc kubenswrapper[4716]: E1209 16:28:00.214606 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:28:01 crc kubenswrapper[4716]: E1209 16:28:01.215941 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:28:04 crc kubenswrapper[4716]: I1209 16:28:04.611405 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-pd7rx" Dec 09 16:28:04 crc kubenswrapper[4716]: I1209 16:28:04.668867 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pd7rx"] Dec 09 16:28:05 crc kubenswrapper[4716]: I1209 16:28:05.101794 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-pd7rx" podUID="3d6f79b4-224e-4527-9476-63459d3a1da0" containerName="registry-server" containerID="cri-o://1d2a72d89be0157f20bd767c5b6dd2225e014bdd9fd465f33025fd203684a717" gracePeriod=2 Dec 09 16:28:05 crc kubenswrapper[4716]: I1209 16:28:05.796189 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pd7rx" Dec 09 16:28:05 crc kubenswrapper[4716]: I1209 16:28:05.899164 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8kgxb\" (UniqueName: \"kubernetes.io/projected/3d6f79b4-224e-4527-9476-63459d3a1da0-kube-api-access-8kgxb\") pod \"3d6f79b4-224e-4527-9476-63459d3a1da0\" (UID: \"3d6f79b4-224e-4527-9476-63459d3a1da0\") " Dec 09 16:28:05 crc kubenswrapper[4716]: I1209 16:28:05.899345 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d6f79b4-224e-4527-9476-63459d3a1da0-catalog-content\") pod \"3d6f79b4-224e-4527-9476-63459d3a1da0\" (UID: \"3d6f79b4-224e-4527-9476-63459d3a1da0\") " Dec 09 16:28:05 crc kubenswrapper[4716]: I1209 16:28:05.899443 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d6f79b4-224e-4527-9476-63459d3a1da0-utilities\") pod \"3d6f79b4-224e-4527-9476-63459d3a1da0\" (UID: \"3d6f79b4-224e-4527-9476-63459d3a1da0\") " Dec 09 16:28:05 crc kubenswrapper[4716]: I1209 16:28:05.900217 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3d6f79b4-224e-4527-9476-63459d3a1da0-utilities" (OuterVolumeSpecName: "utilities") pod "3d6f79b4-224e-4527-9476-63459d3a1da0" (UID: "3d6f79b4-224e-4527-9476-63459d3a1da0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:28:05 crc kubenswrapper[4716]: I1209 16:28:05.905236 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d6f79b4-224e-4527-9476-63459d3a1da0-kube-api-access-8kgxb" (OuterVolumeSpecName: "kube-api-access-8kgxb") pod "3d6f79b4-224e-4527-9476-63459d3a1da0" (UID: "3d6f79b4-224e-4527-9476-63459d3a1da0"). InnerVolumeSpecName "kube-api-access-8kgxb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:28:05 crc kubenswrapper[4716]: I1209 16:28:05.950208 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3d6f79b4-224e-4527-9476-63459d3a1da0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3d6f79b4-224e-4527-9476-63459d3a1da0" (UID: "3d6f79b4-224e-4527-9476-63459d3a1da0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:28:06 crc kubenswrapper[4716]: I1209 16:28:06.002456 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8kgxb\" (UniqueName: \"kubernetes.io/projected/3d6f79b4-224e-4527-9476-63459d3a1da0-kube-api-access-8kgxb\") on node \"crc\" DevicePath \"\"" Dec 09 16:28:06 crc kubenswrapper[4716]: I1209 16:28:06.002494 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d6f79b4-224e-4527-9476-63459d3a1da0-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 16:28:06 crc kubenswrapper[4716]: I1209 16:28:06.002504 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d6f79b4-224e-4527-9476-63459d3a1da0-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 16:28:06 crc kubenswrapper[4716]: I1209 16:28:06.111568 4716 generic.go:334] "Generic (PLEG): container finished" podID="3d6f79b4-224e-4527-9476-63459d3a1da0" containerID="1d2a72d89be0157f20bd767c5b6dd2225e014bdd9fd465f33025fd203684a717" exitCode=0 Dec 09 16:28:06 crc kubenswrapper[4716]: I1209 16:28:06.111658 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pd7rx" event={"ID":"3d6f79b4-224e-4527-9476-63459d3a1da0","Type":"ContainerDied","Data":"1d2a72d89be0157f20bd767c5b6dd2225e014bdd9fd465f33025fd203684a717"} Dec 09 16:28:06 crc kubenswrapper[4716]: I1209 16:28:06.111675 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pd7rx" Dec 09 16:28:06 crc kubenswrapper[4716]: I1209 16:28:06.111703 4716 scope.go:117] "RemoveContainer" containerID="1d2a72d89be0157f20bd767c5b6dd2225e014bdd9fd465f33025fd203684a717" Dec 09 16:28:06 crc kubenswrapper[4716]: I1209 16:28:06.111690 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pd7rx" event={"ID":"3d6f79b4-224e-4527-9476-63459d3a1da0","Type":"ContainerDied","Data":"07c50a64c47b29b80c687e415d0d50442fc8e06382426747419d255c20ec6cec"} Dec 09 16:28:06 crc kubenswrapper[4716]: I1209 16:28:06.132155 4716 scope.go:117] "RemoveContainer" containerID="ed4c62c4206e73757786a930db4c41e89574ce37376c287df72a927c33235aaa" Dec 09 16:28:06 crc kubenswrapper[4716]: I1209 16:28:06.150045 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pd7rx"] Dec 09 16:28:06 crc kubenswrapper[4716]: I1209 16:28:06.160208 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-pd7rx"] Dec 09 16:28:06 crc kubenswrapper[4716]: I1209 16:28:06.174713 4716 scope.go:117] "RemoveContainer" containerID="b28834c87d4774eb4314159ff413a115697e59951468713c46f2017725b7a862" Dec 09 16:28:06 crc kubenswrapper[4716]: I1209 16:28:06.208131 4716 scope.go:117] "RemoveContainer" containerID="1d2a72d89be0157f20bd767c5b6dd2225e014bdd9fd465f33025fd203684a717" Dec 09 16:28:06 crc kubenswrapper[4716]: E1209 16:28:06.208643 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1d2a72d89be0157f20bd767c5b6dd2225e014bdd9fd465f33025fd203684a717\": container with ID starting with 1d2a72d89be0157f20bd767c5b6dd2225e014bdd9fd465f33025fd203684a717 not found: ID does not exist" containerID="1d2a72d89be0157f20bd767c5b6dd2225e014bdd9fd465f33025fd203684a717" Dec 09 16:28:06 crc kubenswrapper[4716]: I1209 16:28:06.208694 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1d2a72d89be0157f20bd767c5b6dd2225e014bdd9fd465f33025fd203684a717"} err="failed to get container status \"1d2a72d89be0157f20bd767c5b6dd2225e014bdd9fd465f33025fd203684a717\": rpc error: code = NotFound desc = could not find container \"1d2a72d89be0157f20bd767c5b6dd2225e014bdd9fd465f33025fd203684a717\": container with ID starting with 1d2a72d89be0157f20bd767c5b6dd2225e014bdd9fd465f33025fd203684a717 not found: ID does not exist" Dec 09 16:28:06 crc kubenswrapper[4716]: I1209 16:28:06.208745 4716 scope.go:117] "RemoveContainer" containerID="ed4c62c4206e73757786a930db4c41e89574ce37376c287df72a927c33235aaa" Dec 09 16:28:06 crc kubenswrapper[4716]: E1209 16:28:06.209220 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ed4c62c4206e73757786a930db4c41e89574ce37376c287df72a927c33235aaa\": container with ID starting with ed4c62c4206e73757786a930db4c41e89574ce37376c287df72a927c33235aaa not found: ID does not exist" containerID="ed4c62c4206e73757786a930db4c41e89574ce37376c287df72a927c33235aaa" Dec 09 16:28:06 crc kubenswrapper[4716]: I1209 16:28:06.209256 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed4c62c4206e73757786a930db4c41e89574ce37376c287df72a927c33235aaa"} err="failed to get container status \"ed4c62c4206e73757786a930db4c41e89574ce37376c287df72a927c33235aaa\": rpc error: code = NotFound desc = could not find container \"ed4c62c4206e73757786a930db4c41e89574ce37376c287df72a927c33235aaa\": container with ID starting with ed4c62c4206e73757786a930db4c41e89574ce37376c287df72a927c33235aaa not found: ID does not exist" Dec 09 16:28:06 crc kubenswrapper[4716]: I1209 16:28:06.209284 4716 scope.go:117] "RemoveContainer" containerID="b28834c87d4774eb4314159ff413a115697e59951468713c46f2017725b7a862" Dec 09 16:28:06 crc kubenswrapper[4716]: E1209 16:28:06.209555 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b28834c87d4774eb4314159ff413a115697e59951468713c46f2017725b7a862\": container with ID starting with b28834c87d4774eb4314159ff413a115697e59951468713c46f2017725b7a862 not found: ID does not exist" containerID="b28834c87d4774eb4314159ff413a115697e59951468713c46f2017725b7a862" Dec 09 16:28:06 crc kubenswrapper[4716]: I1209 16:28:06.209589 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b28834c87d4774eb4314159ff413a115697e59951468713c46f2017725b7a862"} err="failed to get container status \"b28834c87d4774eb4314159ff413a115697e59951468713c46f2017725b7a862\": rpc error: code = NotFound desc = could not find container \"b28834c87d4774eb4314159ff413a115697e59951468713c46f2017725b7a862\": container with ID starting with b28834c87d4774eb4314159ff413a115697e59951468713c46f2017725b7a862 not found: ID does not exist" Dec 09 16:28:07 crc kubenswrapper[4716]: E1209 16:28:07.216772 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:28:07 crc kubenswrapper[4716]: I1209 16:28:07.227932 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d6f79b4-224e-4527-9476-63459d3a1da0" path="/var/lib/kubelet/pods/3d6f79b4-224e-4527-9476-63459d3a1da0/volumes" Dec 09 16:28:13 crc kubenswrapper[4716]: I1209 16:28:13.223313 4716 scope.go:117] "RemoveContainer" containerID="eefffdae028576e327313dd73908e2cff9e01c492e5c8eb5f6860a6bc5b7d875" Dec 09 16:28:13 crc kubenswrapper[4716]: E1209 16:28:13.224546 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:28:14 crc kubenswrapper[4716]: E1209 16:28:14.216367 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:28:21 crc kubenswrapper[4716]: E1209 16:28:21.221778 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:28:26 crc kubenswrapper[4716]: I1209 16:28:26.215547 4716 scope.go:117] "RemoveContainer" containerID="eefffdae028576e327313dd73908e2cff9e01c492e5c8eb5f6860a6bc5b7d875" Dec 09 16:28:26 crc kubenswrapper[4716]: E1209 16:28:26.216722 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:28:27 crc kubenswrapper[4716]: E1209 16:28:27.217473 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:28:32 crc kubenswrapper[4716]: E1209 16:28:32.217687 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:28:37 crc kubenswrapper[4716]: I1209 16:28:37.213804 4716 scope.go:117] "RemoveContainer" containerID="eefffdae028576e327313dd73908e2cff9e01c492e5c8eb5f6860a6bc5b7d875" Dec 09 16:28:37 crc kubenswrapper[4716]: E1209 16:28:37.215557 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:28:38 crc kubenswrapper[4716]: E1209 16:28:38.216596 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:28:46 crc kubenswrapper[4716]: E1209 16:28:46.216598 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:28:49 crc kubenswrapper[4716]: I1209 16:28:49.214843 4716 scope.go:117] "RemoveContainer" containerID="eefffdae028576e327313dd73908e2cff9e01c492e5c8eb5f6860a6bc5b7d875" Dec 09 16:28:49 crc kubenswrapper[4716]: E1209 16:28:49.215705 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:28:51 crc kubenswrapper[4716]: E1209 16:28:51.218739 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:28:59 crc kubenswrapper[4716]: E1209 16:28:59.216405 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:29:03 crc kubenswrapper[4716]: I1209 16:29:03.225456 4716 scope.go:117] "RemoveContainer" containerID="eefffdae028576e327313dd73908e2cff9e01c492e5c8eb5f6860a6bc5b7d875" Dec 09 16:29:03 crc kubenswrapper[4716]: E1209 16:29:03.226568 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:29:05 crc kubenswrapper[4716]: I1209 16:29:05.217390 4716 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 09 16:29:05 crc kubenswrapper[4716]: E1209 16:29:05.338243 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 09 16:29:05 crc kubenswrapper[4716]: E1209 16:29:05.338337 4716 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 09 16:29:05 crc kubenswrapper[4716]: E1209 16:29:05.338493 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qfm49,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-msshl_openstack(239c4119-944c-46e1-9425-285eeb6e0204): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 16:29:05 crc kubenswrapper[4716]: E1209 16:29:05.339720 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:29:14 crc kubenswrapper[4716]: E1209 16:29:14.343948 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 16:29:14 crc kubenswrapper[4716]: E1209 16:29:14.344376 4716 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 16:29:14 crc kubenswrapper[4716]: E1209 16:29:14.344508 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5fbh9fh77h667h669h94h589h59dh5bdh5bch9fh68h689h699h559h549h56fh5f5h68fh557h5cch7fh66ch6ch86h56h75h55fh58h5ffhf9h5fdq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lz724,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(29cebb2d-8cdb-49de-a29d-1d02808e46a9): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 16:29:14 crc kubenswrapper[4716]: E1209 16:29:14.346590 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:29:18 crc kubenswrapper[4716]: I1209 16:29:18.214126 4716 scope.go:117] "RemoveContainer" containerID="eefffdae028576e327313dd73908e2cff9e01c492e5c8eb5f6860a6bc5b7d875" Dec 09 16:29:18 crc kubenswrapper[4716]: E1209 16:29:18.215141 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:29:20 crc kubenswrapper[4716]: E1209 16:29:20.215719 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:29:27 crc kubenswrapper[4716]: E1209 16:29:27.217853 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:29:32 crc kubenswrapper[4716]: E1209 16:29:32.217055 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:29:33 crc kubenswrapper[4716]: I1209 16:29:33.226010 4716 scope.go:117] "RemoveContainer" containerID="eefffdae028576e327313dd73908e2cff9e01c492e5c8eb5f6860a6bc5b7d875" Dec 09 16:29:33 crc kubenswrapper[4716]: E1209 16:29:33.226351 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:29:38 crc kubenswrapper[4716]: E1209 16:29:38.216471 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:29:43 crc kubenswrapper[4716]: E1209 16:29:43.410584 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:29:48 crc kubenswrapper[4716]: I1209 16:29:48.327635 4716 scope.go:117] "RemoveContainer" containerID="eefffdae028576e327313dd73908e2cff9e01c492e5c8eb5f6860a6bc5b7d875" Dec 09 16:29:53 crc kubenswrapper[4716]: E1209 16:29:53.225438 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:29:56 crc kubenswrapper[4716]: E1209 16:29:56.215457 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:30:00 crc kubenswrapper[4716]: I1209 16:30:00.160111 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421630-2qrcl"] Dec 09 16:30:00 crc kubenswrapper[4716]: E1209 16:30:00.161379 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d6f79b4-224e-4527-9476-63459d3a1da0" containerName="extract-utilities" Dec 09 16:30:00 crc kubenswrapper[4716]: I1209 16:30:00.161405 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d6f79b4-224e-4527-9476-63459d3a1da0" containerName="extract-utilities" Dec 09 16:30:00 crc kubenswrapper[4716]: E1209 16:30:00.161460 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d6f79b4-224e-4527-9476-63459d3a1da0" containerName="registry-server" Dec 09 16:30:00 crc kubenswrapper[4716]: I1209 16:30:00.161470 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d6f79b4-224e-4527-9476-63459d3a1da0" containerName="registry-server" Dec 09 16:30:00 crc kubenswrapper[4716]: E1209 16:30:00.161486 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d6f79b4-224e-4527-9476-63459d3a1da0" containerName="extract-content" Dec 09 16:30:00 crc kubenswrapper[4716]: I1209 16:30:00.161492 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d6f79b4-224e-4527-9476-63459d3a1da0" containerName="extract-content" Dec 09 16:30:00 crc kubenswrapper[4716]: I1209 16:30:00.161821 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d6f79b4-224e-4527-9476-63459d3a1da0" containerName="registry-server" Dec 09 16:30:00 crc kubenswrapper[4716]: I1209 16:30:00.163096 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421630-2qrcl" Dec 09 16:30:00 crc kubenswrapper[4716]: I1209 16:30:00.165441 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m7k9s\" (UniqueName: \"kubernetes.io/projected/e9d43c60-67f8-42e2-a521-4bdfecfcaca4-kube-api-access-m7k9s\") pod \"collect-profiles-29421630-2qrcl\" (UID: \"e9d43c60-67f8-42e2-a521-4bdfecfcaca4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421630-2qrcl" Dec 09 16:30:00 crc kubenswrapper[4716]: I1209 16:30:00.165935 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e9d43c60-67f8-42e2-a521-4bdfecfcaca4-secret-volume\") pod \"collect-profiles-29421630-2qrcl\" (UID: \"e9d43c60-67f8-42e2-a521-4bdfecfcaca4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421630-2qrcl" Dec 09 16:30:00 crc kubenswrapper[4716]: I1209 16:30:00.166041 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e9d43c60-67f8-42e2-a521-4bdfecfcaca4-config-volume\") pod \"collect-profiles-29421630-2qrcl\" (UID: \"e9d43c60-67f8-42e2-a521-4bdfecfcaca4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421630-2qrcl" Dec 09 16:30:00 crc kubenswrapper[4716]: I1209 16:30:00.166195 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 09 16:30:00 crc kubenswrapper[4716]: I1209 16:30:00.169582 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 09 16:30:00 crc kubenswrapper[4716]: I1209 16:30:00.171956 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421630-2qrcl"] Dec 09 16:30:00 crc kubenswrapper[4716]: I1209 16:30:00.269169 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m7k9s\" (UniqueName: \"kubernetes.io/projected/e9d43c60-67f8-42e2-a521-4bdfecfcaca4-kube-api-access-m7k9s\") pod \"collect-profiles-29421630-2qrcl\" (UID: \"e9d43c60-67f8-42e2-a521-4bdfecfcaca4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421630-2qrcl" Dec 09 16:30:00 crc kubenswrapper[4716]: I1209 16:30:00.269261 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e9d43c60-67f8-42e2-a521-4bdfecfcaca4-secret-volume\") pod \"collect-profiles-29421630-2qrcl\" (UID: \"e9d43c60-67f8-42e2-a521-4bdfecfcaca4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421630-2qrcl" Dec 09 16:30:00 crc kubenswrapper[4716]: I1209 16:30:00.269319 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e9d43c60-67f8-42e2-a521-4bdfecfcaca4-config-volume\") pod \"collect-profiles-29421630-2qrcl\" (UID: \"e9d43c60-67f8-42e2-a521-4bdfecfcaca4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421630-2qrcl" Dec 09 16:30:00 crc kubenswrapper[4716]: I1209 16:30:00.270638 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e9d43c60-67f8-42e2-a521-4bdfecfcaca4-config-volume\") pod \"collect-profiles-29421630-2qrcl\" (UID: \"e9d43c60-67f8-42e2-a521-4bdfecfcaca4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421630-2qrcl" Dec 09 16:30:00 crc kubenswrapper[4716]: I1209 16:30:00.276229 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e9d43c60-67f8-42e2-a521-4bdfecfcaca4-secret-volume\") pod \"collect-profiles-29421630-2qrcl\" (UID: \"e9d43c60-67f8-42e2-a521-4bdfecfcaca4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421630-2qrcl" Dec 09 16:30:00 crc kubenswrapper[4716]: I1209 16:30:00.286570 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m7k9s\" (UniqueName: \"kubernetes.io/projected/e9d43c60-67f8-42e2-a521-4bdfecfcaca4-kube-api-access-m7k9s\") pod \"collect-profiles-29421630-2qrcl\" (UID: \"e9d43c60-67f8-42e2-a521-4bdfecfcaca4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421630-2qrcl" Dec 09 16:30:00 crc kubenswrapper[4716]: I1209 16:30:00.496031 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421630-2qrcl" Dec 09 16:30:00 crc kubenswrapper[4716]: I1209 16:30:00.630601 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerStarted","Data":"7c3ee6b5713fd27686aeb15dc72c9af34b2b0fc615a90f78e1715c3ca64bfecc"} Dec 09 16:30:01 crc kubenswrapper[4716]: I1209 16:30:01.495398 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421630-2qrcl"] Dec 09 16:30:01 crc kubenswrapper[4716]: W1209 16:30:01.501109 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode9d43c60_67f8_42e2_a521_4bdfecfcaca4.slice/crio-f7243fb36d28b6e26ef62f0b1fed8b595f0ccac29e462d2d34a744cf8c476cac WatchSource:0}: Error finding container f7243fb36d28b6e26ef62f0b1fed8b595f0ccac29e462d2d34a744cf8c476cac: Status 404 returned error can't find the container with id f7243fb36d28b6e26ef62f0b1fed8b595f0ccac29e462d2d34a744cf8c476cac Dec 09 16:30:01 crc kubenswrapper[4716]: I1209 16:30:01.656368 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421630-2qrcl" event={"ID":"e9d43c60-67f8-42e2-a521-4bdfecfcaca4","Type":"ContainerStarted","Data":"f7243fb36d28b6e26ef62f0b1fed8b595f0ccac29e462d2d34a744cf8c476cac"} Dec 09 16:30:02 crc kubenswrapper[4716]: I1209 16:30:02.667935 4716 generic.go:334] "Generic (PLEG): container finished" podID="e9d43c60-67f8-42e2-a521-4bdfecfcaca4" containerID="e2d95781da3baa9829b3e8521c85ced20b19063c7708084117ab6e41124cd448" exitCode=0 Dec 09 16:30:02 crc kubenswrapper[4716]: I1209 16:30:02.668039 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421630-2qrcl" event={"ID":"e9d43c60-67f8-42e2-a521-4bdfecfcaca4","Type":"ContainerDied","Data":"e2d95781da3baa9829b3e8521c85ced20b19063c7708084117ab6e41124cd448"} Dec 09 16:30:04 crc kubenswrapper[4716]: I1209 16:30:04.733834 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421630-2qrcl" event={"ID":"e9d43c60-67f8-42e2-a521-4bdfecfcaca4","Type":"ContainerDied","Data":"f7243fb36d28b6e26ef62f0b1fed8b595f0ccac29e462d2d34a744cf8c476cac"} Dec 09 16:30:04 crc kubenswrapper[4716]: I1209 16:30:04.734384 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f7243fb36d28b6e26ef62f0b1fed8b595f0ccac29e462d2d34a744cf8c476cac" Dec 09 16:30:04 crc kubenswrapper[4716]: I1209 16:30:04.759185 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421630-2qrcl" Dec 09 16:30:04 crc kubenswrapper[4716]: I1209 16:30:04.910296 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m7k9s\" (UniqueName: \"kubernetes.io/projected/e9d43c60-67f8-42e2-a521-4bdfecfcaca4-kube-api-access-m7k9s\") pod \"e9d43c60-67f8-42e2-a521-4bdfecfcaca4\" (UID: \"e9d43c60-67f8-42e2-a521-4bdfecfcaca4\") " Dec 09 16:30:04 crc kubenswrapper[4716]: I1209 16:30:04.910374 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e9d43c60-67f8-42e2-a521-4bdfecfcaca4-config-volume\") pod \"e9d43c60-67f8-42e2-a521-4bdfecfcaca4\" (UID: \"e9d43c60-67f8-42e2-a521-4bdfecfcaca4\") " Dec 09 16:30:04 crc kubenswrapper[4716]: I1209 16:30:04.910583 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e9d43c60-67f8-42e2-a521-4bdfecfcaca4-secret-volume\") pod \"e9d43c60-67f8-42e2-a521-4bdfecfcaca4\" (UID: \"e9d43c60-67f8-42e2-a521-4bdfecfcaca4\") " Dec 09 16:30:04 crc kubenswrapper[4716]: I1209 16:30:04.912113 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e9d43c60-67f8-42e2-a521-4bdfecfcaca4-config-volume" (OuterVolumeSpecName: "config-volume") pod "e9d43c60-67f8-42e2-a521-4bdfecfcaca4" (UID: "e9d43c60-67f8-42e2-a521-4bdfecfcaca4"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:30:04 crc kubenswrapper[4716]: I1209 16:30:04.916199 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9d43c60-67f8-42e2-a521-4bdfecfcaca4-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "e9d43c60-67f8-42e2-a521-4bdfecfcaca4" (UID: "e9d43c60-67f8-42e2-a521-4bdfecfcaca4"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:30:04 crc kubenswrapper[4716]: I1209 16:30:04.916410 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e9d43c60-67f8-42e2-a521-4bdfecfcaca4-kube-api-access-m7k9s" (OuterVolumeSpecName: "kube-api-access-m7k9s") pod "e9d43c60-67f8-42e2-a521-4bdfecfcaca4" (UID: "e9d43c60-67f8-42e2-a521-4bdfecfcaca4"). InnerVolumeSpecName "kube-api-access-m7k9s". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:30:05 crc kubenswrapper[4716]: I1209 16:30:05.014013 4716 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e9d43c60-67f8-42e2-a521-4bdfecfcaca4-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 09 16:30:05 crc kubenswrapper[4716]: I1209 16:30:05.014307 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m7k9s\" (UniqueName: \"kubernetes.io/projected/e9d43c60-67f8-42e2-a521-4bdfecfcaca4-kube-api-access-m7k9s\") on node \"crc\" DevicePath \"\"" Dec 09 16:30:05 crc kubenswrapper[4716]: I1209 16:30:05.014402 4716 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e9d43c60-67f8-42e2-a521-4bdfecfcaca4-config-volume\") on node \"crc\" DevicePath \"\"" Dec 09 16:30:05 crc kubenswrapper[4716]: E1209 16:30:05.216505 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:30:05 crc kubenswrapper[4716]: I1209 16:30:05.743410 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421630-2qrcl" Dec 09 16:30:05 crc kubenswrapper[4716]: I1209 16:30:05.839517 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421585-sgsfr"] Dec 09 16:30:05 crc kubenswrapper[4716]: I1209 16:30:05.849645 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421585-sgsfr"] Dec 09 16:30:07 crc kubenswrapper[4716]: I1209 16:30:07.228407 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d577797-357f-4847-9841-6469587f7285" path="/var/lib/kubelet/pods/8d577797-357f-4847-9841-6469587f7285/volumes" Dec 09 16:30:08 crc kubenswrapper[4716]: E1209 16:30:08.216278 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:30:15 crc kubenswrapper[4716]: I1209 16:30:15.872935 4716 scope.go:117] "RemoveContainer" containerID="d3d92986e7f39730ec0a4896e2a7b3d24cd1cb1d95a3cc0fd85e62023c6505a9" Dec 09 16:30:16 crc kubenswrapper[4716]: E1209 16:30:16.215534 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:30:19 crc kubenswrapper[4716]: E1209 16:30:19.216552 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:30:27 crc kubenswrapper[4716]: E1209 16:30:27.216834 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:30:34 crc kubenswrapper[4716]: E1209 16:30:34.216829 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:30:38 crc kubenswrapper[4716]: E1209 16:30:38.217065 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:30:48 crc kubenswrapper[4716]: E1209 16:30:48.216944 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:30:52 crc kubenswrapper[4716]: E1209 16:30:52.218006 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:30:59 crc kubenswrapper[4716]: E1209 16:30:59.216179 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:31:07 crc kubenswrapper[4716]: E1209 16:31:07.216992 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:31:10 crc kubenswrapper[4716]: E1209 16:31:10.216403 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:31:14 crc kubenswrapper[4716]: I1209 16:31:14.995943 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-r7ztf"] Dec 09 16:31:14 crc kubenswrapper[4716]: E1209 16:31:14.997221 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9d43c60-67f8-42e2-a521-4bdfecfcaca4" containerName="collect-profiles" Dec 09 16:31:14 crc kubenswrapper[4716]: I1209 16:31:14.997242 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9d43c60-67f8-42e2-a521-4bdfecfcaca4" containerName="collect-profiles" Dec 09 16:31:14 crc kubenswrapper[4716]: I1209 16:31:14.997638 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9d43c60-67f8-42e2-a521-4bdfecfcaca4" containerName="collect-profiles" Dec 09 16:31:15 crc kubenswrapper[4716]: I1209 16:31:15.005109 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r7ztf" Dec 09 16:31:15 crc kubenswrapper[4716]: I1209 16:31:15.007900 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-r7ztf"] Dec 09 16:31:15 crc kubenswrapper[4716]: I1209 16:31:15.085559 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/252727f3-87c7-420e-9bc2-5a7399f2e0e7-catalog-content\") pod \"redhat-operators-r7ztf\" (UID: \"252727f3-87c7-420e-9bc2-5a7399f2e0e7\") " pod="openshift-marketplace/redhat-operators-r7ztf" Dec 09 16:31:15 crc kubenswrapper[4716]: I1209 16:31:15.085970 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vxr5v\" (UniqueName: \"kubernetes.io/projected/252727f3-87c7-420e-9bc2-5a7399f2e0e7-kube-api-access-vxr5v\") pod \"redhat-operators-r7ztf\" (UID: \"252727f3-87c7-420e-9bc2-5a7399f2e0e7\") " pod="openshift-marketplace/redhat-operators-r7ztf" Dec 09 16:31:15 crc kubenswrapper[4716]: I1209 16:31:15.086076 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/252727f3-87c7-420e-9bc2-5a7399f2e0e7-utilities\") pod \"redhat-operators-r7ztf\" (UID: \"252727f3-87c7-420e-9bc2-5a7399f2e0e7\") " pod="openshift-marketplace/redhat-operators-r7ztf" Dec 09 16:31:15 crc kubenswrapper[4716]: I1209 16:31:15.188473 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vxr5v\" (UniqueName: \"kubernetes.io/projected/252727f3-87c7-420e-9bc2-5a7399f2e0e7-kube-api-access-vxr5v\") pod \"redhat-operators-r7ztf\" (UID: \"252727f3-87c7-420e-9bc2-5a7399f2e0e7\") " pod="openshift-marketplace/redhat-operators-r7ztf" Dec 09 16:31:15 crc kubenswrapper[4716]: I1209 16:31:15.188606 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/252727f3-87c7-420e-9bc2-5a7399f2e0e7-utilities\") pod \"redhat-operators-r7ztf\" (UID: \"252727f3-87c7-420e-9bc2-5a7399f2e0e7\") " pod="openshift-marketplace/redhat-operators-r7ztf" Dec 09 16:31:15 crc kubenswrapper[4716]: I1209 16:31:15.188835 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/252727f3-87c7-420e-9bc2-5a7399f2e0e7-catalog-content\") pod \"redhat-operators-r7ztf\" (UID: \"252727f3-87c7-420e-9bc2-5a7399f2e0e7\") " pod="openshift-marketplace/redhat-operators-r7ztf" Dec 09 16:31:15 crc kubenswrapper[4716]: I1209 16:31:15.189107 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/252727f3-87c7-420e-9bc2-5a7399f2e0e7-utilities\") pod \"redhat-operators-r7ztf\" (UID: \"252727f3-87c7-420e-9bc2-5a7399f2e0e7\") " pod="openshift-marketplace/redhat-operators-r7ztf" Dec 09 16:31:15 crc kubenswrapper[4716]: I1209 16:31:15.189313 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/252727f3-87c7-420e-9bc2-5a7399f2e0e7-catalog-content\") pod \"redhat-operators-r7ztf\" (UID: \"252727f3-87c7-420e-9bc2-5a7399f2e0e7\") " pod="openshift-marketplace/redhat-operators-r7ztf" Dec 09 16:31:15 crc kubenswrapper[4716]: I1209 16:31:15.215592 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vxr5v\" (UniqueName: \"kubernetes.io/projected/252727f3-87c7-420e-9bc2-5a7399f2e0e7-kube-api-access-vxr5v\") pod \"redhat-operators-r7ztf\" (UID: \"252727f3-87c7-420e-9bc2-5a7399f2e0e7\") " pod="openshift-marketplace/redhat-operators-r7ztf" Dec 09 16:31:15 crc kubenswrapper[4716]: I1209 16:31:15.339571 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r7ztf" Dec 09 16:31:15 crc kubenswrapper[4716]: I1209 16:31:15.829310 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-r7ztf"] Dec 09 16:31:16 crc kubenswrapper[4716]: I1209 16:31:16.116950 4716 generic.go:334] "Generic (PLEG): container finished" podID="252727f3-87c7-420e-9bc2-5a7399f2e0e7" containerID="481940dfef0f52c0f906f3c9b73894202bb186abcebfb5834fa7abee4a1ad9b2" exitCode=0 Dec 09 16:31:16 crc kubenswrapper[4716]: I1209 16:31:16.117001 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r7ztf" event={"ID":"252727f3-87c7-420e-9bc2-5a7399f2e0e7","Type":"ContainerDied","Data":"481940dfef0f52c0f906f3c9b73894202bb186abcebfb5834fa7abee4a1ad9b2"} Dec 09 16:31:16 crc kubenswrapper[4716]: I1209 16:31:16.117229 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r7ztf" event={"ID":"252727f3-87c7-420e-9bc2-5a7399f2e0e7","Type":"ContainerStarted","Data":"c7272b4ee7d5df1c0718e9f244173de725dc66cfa37acbdca98b662492fa0c0c"} Dec 09 16:31:20 crc kubenswrapper[4716]: E1209 16:31:20.217310 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:31:22 crc kubenswrapper[4716]: E1209 16:31:22.218968 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:31:26 crc kubenswrapper[4716]: I1209 16:31:26.256321 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r7ztf" event={"ID":"252727f3-87c7-420e-9bc2-5a7399f2e0e7","Type":"ContainerStarted","Data":"306c6891d2d6f0c56b26642b63a86fc50e4c0a1ef4140d21e9e594c06af5524e"} Dec 09 16:31:28 crc kubenswrapper[4716]: I1209 16:31:28.279156 4716 generic.go:334] "Generic (PLEG): container finished" podID="252727f3-87c7-420e-9bc2-5a7399f2e0e7" containerID="306c6891d2d6f0c56b26642b63a86fc50e4c0a1ef4140d21e9e594c06af5524e" exitCode=0 Dec 09 16:31:28 crc kubenswrapper[4716]: I1209 16:31:28.279235 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r7ztf" event={"ID":"252727f3-87c7-420e-9bc2-5a7399f2e0e7","Type":"ContainerDied","Data":"306c6891d2d6f0c56b26642b63a86fc50e4c0a1ef4140d21e9e594c06af5524e"} Dec 09 16:31:29 crc kubenswrapper[4716]: I1209 16:31:29.292750 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r7ztf" event={"ID":"252727f3-87c7-420e-9bc2-5a7399f2e0e7","Type":"ContainerStarted","Data":"b6fc51f749dd339ef4ab830d4cc605e4b6faade1e3e5d49575eeb44b2d54b380"} Dec 09 16:31:29 crc kubenswrapper[4716]: I1209 16:31:29.320382 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-r7ztf" podStartSLOduration=2.65550216 podStartE2EDuration="15.320343418s" podCreationTimestamp="2025-12-09 16:31:14 +0000 UTC" firstStartedPulling="2025-12-09 16:31:16.11900825 +0000 UTC m=+4963.273752238" lastFinishedPulling="2025-12-09 16:31:28.783849508 +0000 UTC m=+4975.938593496" observedRunningTime="2025-12-09 16:31:29.314809121 +0000 UTC m=+4976.469553109" watchObservedRunningTime="2025-12-09 16:31:29.320343418 +0000 UTC m=+4976.475087406" Dec 09 16:31:34 crc kubenswrapper[4716]: E1209 16:31:34.217186 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:31:35 crc kubenswrapper[4716]: I1209 16:31:35.340790 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-r7ztf" Dec 09 16:31:35 crc kubenswrapper[4716]: I1209 16:31:35.341167 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-r7ztf" Dec 09 16:31:35 crc kubenswrapper[4716]: I1209 16:31:35.392781 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-r7ztf" Dec 09 16:31:36 crc kubenswrapper[4716]: E1209 16:31:36.216455 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:31:36 crc kubenswrapper[4716]: I1209 16:31:36.414771 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-r7ztf" Dec 09 16:31:36 crc kubenswrapper[4716]: I1209 16:31:36.507097 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-r7ztf"] Dec 09 16:31:36 crc kubenswrapper[4716]: I1209 16:31:36.553936 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dtdsg"] Dec 09 16:31:36 crc kubenswrapper[4716]: I1209 16:31:36.554282 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-dtdsg" podUID="6e63faed-8d6e-4b22-97be-20cc79795147" containerName="registry-server" containerID="cri-o://88d2b581184ab3a2ee28df76595e8b501be42f774c2005da731137317fbccc75" gracePeriod=2 Dec 09 16:31:37 crc kubenswrapper[4716]: E1209 16:31:37.091536 4716 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6e63faed_8d6e_4b22_97be_20cc79795147.slice/crio-88d2b581184ab3a2ee28df76595e8b501be42f774c2005da731137317fbccc75.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6e63faed_8d6e_4b22_97be_20cc79795147.slice/crio-conmon-88d2b581184ab3a2ee28df76595e8b501be42f774c2005da731137317fbccc75.scope\": RecentStats: unable to find data in memory cache]" Dec 09 16:31:37 crc kubenswrapper[4716]: I1209 16:31:37.201054 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dtdsg" Dec 09 16:31:37 crc kubenswrapper[4716]: I1209 16:31:37.305654 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e63faed-8d6e-4b22-97be-20cc79795147-catalog-content\") pod \"6e63faed-8d6e-4b22-97be-20cc79795147\" (UID: \"6e63faed-8d6e-4b22-97be-20cc79795147\") " Dec 09 16:31:37 crc kubenswrapper[4716]: I1209 16:31:37.305717 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e63faed-8d6e-4b22-97be-20cc79795147-utilities\") pod \"6e63faed-8d6e-4b22-97be-20cc79795147\" (UID: \"6e63faed-8d6e-4b22-97be-20cc79795147\") " Dec 09 16:31:37 crc kubenswrapper[4716]: I1209 16:31:37.305892 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bj79z\" (UniqueName: \"kubernetes.io/projected/6e63faed-8d6e-4b22-97be-20cc79795147-kube-api-access-bj79z\") pod \"6e63faed-8d6e-4b22-97be-20cc79795147\" (UID: \"6e63faed-8d6e-4b22-97be-20cc79795147\") " Dec 09 16:31:37 crc kubenswrapper[4716]: I1209 16:31:37.308523 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6e63faed-8d6e-4b22-97be-20cc79795147-utilities" (OuterVolumeSpecName: "utilities") pod "6e63faed-8d6e-4b22-97be-20cc79795147" (UID: "6e63faed-8d6e-4b22-97be-20cc79795147"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:31:37 crc kubenswrapper[4716]: I1209 16:31:37.313770 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e63faed-8d6e-4b22-97be-20cc79795147-kube-api-access-bj79z" (OuterVolumeSpecName: "kube-api-access-bj79z") pod "6e63faed-8d6e-4b22-97be-20cc79795147" (UID: "6e63faed-8d6e-4b22-97be-20cc79795147"). InnerVolumeSpecName "kube-api-access-bj79z". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:31:37 crc kubenswrapper[4716]: I1209 16:31:37.378344 4716 generic.go:334] "Generic (PLEG): container finished" podID="6e63faed-8d6e-4b22-97be-20cc79795147" containerID="88d2b581184ab3a2ee28df76595e8b501be42f774c2005da731137317fbccc75" exitCode=0 Dec 09 16:31:37 crc kubenswrapper[4716]: I1209 16:31:37.379439 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dtdsg" Dec 09 16:31:37 crc kubenswrapper[4716]: I1209 16:31:37.379987 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dtdsg" event={"ID":"6e63faed-8d6e-4b22-97be-20cc79795147","Type":"ContainerDied","Data":"88d2b581184ab3a2ee28df76595e8b501be42f774c2005da731137317fbccc75"} Dec 09 16:31:37 crc kubenswrapper[4716]: I1209 16:31:37.380018 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dtdsg" event={"ID":"6e63faed-8d6e-4b22-97be-20cc79795147","Type":"ContainerDied","Data":"4768241883b59e63b7491ecf14dd21b161a2b9f874ffe4c3803595ebabe9aedb"} Dec 09 16:31:37 crc kubenswrapper[4716]: I1209 16:31:37.380059 4716 scope.go:117] "RemoveContainer" containerID="88d2b581184ab3a2ee28df76595e8b501be42f774c2005da731137317fbccc75" Dec 09 16:31:37 crc kubenswrapper[4716]: I1209 16:31:37.404998 4716 scope.go:117] "RemoveContainer" containerID="4b3e1bc3719e2b3fd252ab08471c03ad4aa563e02cddc7fff8089f78f09fe1c0" Dec 09 16:31:37 crc kubenswrapper[4716]: I1209 16:31:37.408932 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e63faed-8d6e-4b22-97be-20cc79795147-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 16:31:37 crc kubenswrapper[4716]: I1209 16:31:37.408951 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bj79z\" (UniqueName: \"kubernetes.io/projected/6e63faed-8d6e-4b22-97be-20cc79795147-kube-api-access-bj79z\") on node \"crc\" DevicePath \"\"" Dec 09 16:31:37 crc kubenswrapper[4716]: I1209 16:31:37.432963 4716 scope.go:117] "RemoveContainer" containerID="feb860df9e8477054f266780ffe476cc12f644ac6038e17f61c3a7b7b950812c" Dec 09 16:31:37 crc kubenswrapper[4716]: I1209 16:31:37.438677 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6e63faed-8d6e-4b22-97be-20cc79795147-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6e63faed-8d6e-4b22-97be-20cc79795147" (UID: "6e63faed-8d6e-4b22-97be-20cc79795147"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:31:37 crc kubenswrapper[4716]: I1209 16:31:37.485634 4716 scope.go:117] "RemoveContainer" containerID="88d2b581184ab3a2ee28df76595e8b501be42f774c2005da731137317fbccc75" Dec 09 16:31:37 crc kubenswrapper[4716]: E1209 16:31:37.486204 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"88d2b581184ab3a2ee28df76595e8b501be42f774c2005da731137317fbccc75\": container with ID starting with 88d2b581184ab3a2ee28df76595e8b501be42f774c2005da731137317fbccc75 not found: ID does not exist" containerID="88d2b581184ab3a2ee28df76595e8b501be42f774c2005da731137317fbccc75" Dec 09 16:31:37 crc kubenswrapper[4716]: I1209 16:31:37.486241 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"88d2b581184ab3a2ee28df76595e8b501be42f774c2005da731137317fbccc75"} err="failed to get container status \"88d2b581184ab3a2ee28df76595e8b501be42f774c2005da731137317fbccc75\": rpc error: code = NotFound desc = could not find container \"88d2b581184ab3a2ee28df76595e8b501be42f774c2005da731137317fbccc75\": container with ID starting with 88d2b581184ab3a2ee28df76595e8b501be42f774c2005da731137317fbccc75 not found: ID does not exist" Dec 09 16:31:37 crc kubenswrapper[4716]: I1209 16:31:37.486264 4716 scope.go:117] "RemoveContainer" containerID="4b3e1bc3719e2b3fd252ab08471c03ad4aa563e02cddc7fff8089f78f09fe1c0" Dec 09 16:31:37 crc kubenswrapper[4716]: E1209 16:31:37.487232 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4b3e1bc3719e2b3fd252ab08471c03ad4aa563e02cddc7fff8089f78f09fe1c0\": container with ID starting with 4b3e1bc3719e2b3fd252ab08471c03ad4aa563e02cddc7fff8089f78f09fe1c0 not found: ID does not exist" containerID="4b3e1bc3719e2b3fd252ab08471c03ad4aa563e02cddc7fff8089f78f09fe1c0" Dec 09 16:31:37 crc kubenswrapper[4716]: I1209 16:31:37.487279 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b3e1bc3719e2b3fd252ab08471c03ad4aa563e02cddc7fff8089f78f09fe1c0"} err="failed to get container status \"4b3e1bc3719e2b3fd252ab08471c03ad4aa563e02cddc7fff8089f78f09fe1c0\": rpc error: code = NotFound desc = could not find container \"4b3e1bc3719e2b3fd252ab08471c03ad4aa563e02cddc7fff8089f78f09fe1c0\": container with ID starting with 4b3e1bc3719e2b3fd252ab08471c03ad4aa563e02cddc7fff8089f78f09fe1c0 not found: ID does not exist" Dec 09 16:31:37 crc kubenswrapper[4716]: I1209 16:31:37.487311 4716 scope.go:117] "RemoveContainer" containerID="feb860df9e8477054f266780ffe476cc12f644ac6038e17f61c3a7b7b950812c" Dec 09 16:31:37 crc kubenswrapper[4716]: E1209 16:31:37.487935 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"feb860df9e8477054f266780ffe476cc12f644ac6038e17f61c3a7b7b950812c\": container with ID starting with feb860df9e8477054f266780ffe476cc12f644ac6038e17f61c3a7b7b950812c not found: ID does not exist" containerID="feb860df9e8477054f266780ffe476cc12f644ac6038e17f61c3a7b7b950812c" Dec 09 16:31:37 crc kubenswrapper[4716]: I1209 16:31:37.488022 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"feb860df9e8477054f266780ffe476cc12f644ac6038e17f61c3a7b7b950812c"} err="failed to get container status \"feb860df9e8477054f266780ffe476cc12f644ac6038e17f61c3a7b7b950812c\": rpc error: code = NotFound desc = could not find container \"feb860df9e8477054f266780ffe476cc12f644ac6038e17f61c3a7b7b950812c\": container with ID starting with feb860df9e8477054f266780ffe476cc12f644ac6038e17f61c3a7b7b950812c not found: ID does not exist" Dec 09 16:31:37 crc kubenswrapper[4716]: I1209 16:31:37.511209 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e63faed-8d6e-4b22-97be-20cc79795147-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 16:31:37 crc kubenswrapper[4716]: I1209 16:31:37.765198 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dtdsg"] Dec 09 16:31:37 crc kubenswrapper[4716]: I1209 16:31:37.777685 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-dtdsg"] Dec 09 16:31:39 crc kubenswrapper[4716]: I1209 16:31:39.227579 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6e63faed-8d6e-4b22-97be-20cc79795147" path="/var/lib/kubelet/pods/6e63faed-8d6e-4b22-97be-20cc79795147/volumes" Dec 09 16:31:40 crc kubenswrapper[4716]: I1209 16:31:40.036328 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-cdztz"] Dec 09 16:31:40 crc kubenswrapper[4716]: E1209 16:31:40.039200 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e63faed-8d6e-4b22-97be-20cc79795147" containerName="extract-content" Dec 09 16:31:40 crc kubenswrapper[4716]: I1209 16:31:40.039236 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e63faed-8d6e-4b22-97be-20cc79795147" containerName="extract-content" Dec 09 16:31:40 crc kubenswrapper[4716]: E1209 16:31:40.039277 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e63faed-8d6e-4b22-97be-20cc79795147" containerName="extract-utilities" Dec 09 16:31:40 crc kubenswrapper[4716]: I1209 16:31:40.039286 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e63faed-8d6e-4b22-97be-20cc79795147" containerName="extract-utilities" Dec 09 16:31:40 crc kubenswrapper[4716]: E1209 16:31:40.039308 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e63faed-8d6e-4b22-97be-20cc79795147" containerName="registry-server" Dec 09 16:31:40 crc kubenswrapper[4716]: I1209 16:31:40.039315 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e63faed-8d6e-4b22-97be-20cc79795147" containerName="registry-server" Dec 09 16:31:40 crc kubenswrapper[4716]: I1209 16:31:40.039611 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e63faed-8d6e-4b22-97be-20cc79795147" containerName="registry-server" Dec 09 16:31:40 crc kubenswrapper[4716]: I1209 16:31:40.040597 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-cdztz" Dec 09 16:31:40 crc kubenswrapper[4716]: I1209 16:31:40.043274 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 09 16:31:40 crc kubenswrapper[4716]: I1209 16:31:40.047127 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 09 16:31:40 crc kubenswrapper[4716]: I1209 16:31:40.047192 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-vjwcm" Dec 09 16:31:40 crc kubenswrapper[4716]: I1209 16:31:40.053665 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-cdztz"] Dec 09 16:31:40 crc kubenswrapper[4716]: I1209 16:31:40.056071 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 09 16:31:40 crc kubenswrapper[4716]: I1209 16:31:40.083155 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h5jfg\" (UniqueName: \"kubernetes.io/projected/0fd09ac5-846e-4895-a05f-0c39613f2ea8-kube-api-access-h5jfg\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-cdztz\" (UID: \"0fd09ac5-846e-4895-a05f-0c39613f2ea8\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-cdztz" Dec 09 16:31:40 crc kubenswrapper[4716]: I1209 16:31:40.083278 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0fd09ac5-846e-4895-a05f-0c39613f2ea8-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-cdztz\" (UID: \"0fd09ac5-846e-4895-a05f-0c39613f2ea8\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-cdztz" Dec 09 16:31:40 crc kubenswrapper[4716]: I1209 16:31:40.083314 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0fd09ac5-846e-4895-a05f-0c39613f2ea8-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-cdztz\" (UID: \"0fd09ac5-846e-4895-a05f-0c39613f2ea8\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-cdztz" Dec 09 16:31:40 crc kubenswrapper[4716]: I1209 16:31:40.186463 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0fd09ac5-846e-4895-a05f-0c39613f2ea8-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-cdztz\" (UID: \"0fd09ac5-846e-4895-a05f-0c39613f2ea8\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-cdztz" Dec 09 16:31:40 crc kubenswrapper[4716]: I1209 16:31:40.186764 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h5jfg\" (UniqueName: \"kubernetes.io/projected/0fd09ac5-846e-4895-a05f-0c39613f2ea8-kube-api-access-h5jfg\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-cdztz\" (UID: \"0fd09ac5-846e-4895-a05f-0c39613f2ea8\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-cdztz" Dec 09 16:31:40 crc kubenswrapper[4716]: I1209 16:31:40.186888 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0fd09ac5-846e-4895-a05f-0c39613f2ea8-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-cdztz\" (UID: \"0fd09ac5-846e-4895-a05f-0c39613f2ea8\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-cdztz" Dec 09 16:31:40 crc kubenswrapper[4716]: I1209 16:31:40.193439 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0fd09ac5-846e-4895-a05f-0c39613f2ea8-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-cdztz\" (UID: \"0fd09ac5-846e-4895-a05f-0c39613f2ea8\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-cdztz" Dec 09 16:31:40 crc kubenswrapper[4716]: I1209 16:31:40.193858 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0fd09ac5-846e-4895-a05f-0c39613f2ea8-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-cdztz\" (UID: \"0fd09ac5-846e-4895-a05f-0c39613f2ea8\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-cdztz" Dec 09 16:31:40 crc kubenswrapper[4716]: I1209 16:31:40.209058 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h5jfg\" (UniqueName: \"kubernetes.io/projected/0fd09ac5-846e-4895-a05f-0c39613f2ea8-kube-api-access-h5jfg\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-cdztz\" (UID: \"0fd09ac5-846e-4895-a05f-0c39613f2ea8\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-cdztz" Dec 09 16:31:40 crc kubenswrapper[4716]: I1209 16:31:40.362369 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-cdztz" Dec 09 16:31:41 crc kubenswrapper[4716]: I1209 16:31:41.049992 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-cdztz"] Dec 09 16:31:41 crc kubenswrapper[4716]: W1209 16:31:41.055034 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0fd09ac5_846e_4895_a05f_0c39613f2ea8.slice/crio-718ca07ab16d646ca15df7b0daab0814d42be070d2870eb5627b1b38485dd8f8 WatchSource:0}: Error finding container 718ca07ab16d646ca15df7b0daab0814d42be070d2870eb5627b1b38485dd8f8: Status 404 returned error can't find the container with id 718ca07ab16d646ca15df7b0daab0814d42be070d2870eb5627b1b38485dd8f8 Dec 09 16:31:41 crc kubenswrapper[4716]: I1209 16:31:41.442273 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-cdztz" event={"ID":"0fd09ac5-846e-4895-a05f-0c39613f2ea8","Type":"ContainerStarted","Data":"718ca07ab16d646ca15df7b0daab0814d42be070d2870eb5627b1b38485dd8f8"} Dec 09 16:31:41 crc kubenswrapper[4716]: I1209 16:31:41.869512 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-pkcn7"] Dec 09 16:31:41 crc kubenswrapper[4716]: I1209 16:31:41.872327 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pkcn7" Dec 09 16:31:41 crc kubenswrapper[4716]: I1209 16:31:41.882481 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pkcn7"] Dec 09 16:31:41 crc kubenswrapper[4716]: I1209 16:31:41.949137 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44211914-77b6-4ea5-8b31-137e51144600-catalog-content\") pod \"certified-operators-pkcn7\" (UID: \"44211914-77b6-4ea5-8b31-137e51144600\") " pod="openshift-marketplace/certified-operators-pkcn7" Dec 09 16:31:41 crc kubenswrapper[4716]: I1209 16:31:41.949468 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xxk9v\" (UniqueName: \"kubernetes.io/projected/44211914-77b6-4ea5-8b31-137e51144600-kube-api-access-xxk9v\") pod \"certified-operators-pkcn7\" (UID: \"44211914-77b6-4ea5-8b31-137e51144600\") " pod="openshift-marketplace/certified-operators-pkcn7" Dec 09 16:31:41 crc kubenswrapper[4716]: I1209 16:31:41.949794 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44211914-77b6-4ea5-8b31-137e51144600-utilities\") pod \"certified-operators-pkcn7\" (UID: \"44211914-77b6-4ea5-8b31-137e51144600\") " pod="openshift-marketplace/certified-operators-pkcn7" Dec 09 16:31:42 crc kubenswrapper[4716]: I1209 16:31:42.051994 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xxk9v\" (UniqueName: \"kubernetes.io/projected/44211914-77b6-4ea5-8b31-137e51144600-kube-api-access-xxk9v\") pod \"certified-operators-pkcn7\" (UID: \"44211914-77b6-4ea5-8b31-137e51144600\") " pod="openshift-marketplace/certified-operators-pkcn7" Dec 09 16:31:42 crc kubenswrapper[4716]: I1209 16:31:42.052180 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44211914-77b6-4ea5-8b31-137e51144600-utilities\") pod \"certified-operators-pkcn7\" (UID: \"44211914-77b6-4ea5-8b31-137e51144600\") " pod="openshift-marketplace/certified-operators-pkcn7" Dec 09 16:31:42 crc kubenswrapper[4716]: I1209 16:31:42.052329 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44211914-77b6-4ea5-8b31-137e51144600-catalog-content\") pod \"certified-operators-pkcn7\" (UID: \"44211914-77b6-4ea5-8b31-137e51144600\") " pod="openshift-marketplace/certified-operators-pkcn7" Dec 09 16:31:42 crc kubenswrapper[4716]: I1209 16:31:42.052766 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44211914-77b6-4ea5-8b31-137e51144600-catalog-content\") pod \"certified-operators-pkcn7\" (UID: \"44211914-77b6-4ea5-8b31-137e51144600\") " pod="openshift-marketplace/certified-operators-pkcn7" Dec 09 16:31:42 crc kubenswrapper[4716]: I1209 16:31:42.053172 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44211914-77b6-4ea5-8b31-137e51144600-utilities\") pod \"certified-operators-pkcn7\" (UID: \"44211914-77b6-4ea5-8b31-137e51144600\") " pod="openshift-marketplace/certified-operators-pkcn7" Dec 09 16:31:42 crc kubenswrapper[4716]: I1209 16:31:42.070482 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xxk9v\" (UniqueName: \"kubernetes.io/projected/44211914-77b6-4ea5-8b31-137e51144600-kube-api-access-xxk9v\") pod \"certified-operators-pkcn7\" (UID: \"44211914-77b6-4ea5-8b31-137e51144600\") " pod="openshift-marketplace/certified-operators-pkcn7" Dec 09 16:31:42 crc kubenswrapper[4716]: I1209 16:31:42.339607 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pkcn7" Dec 09 16:31:42 crc kubenswrapper[4716]: I1209 16:31:42.481260 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-cdztz" event={"ID":"0fd09ac5-846e-4895-a05f-0c39613f2ea8","Type":"ContainerStarted","Data":"0ea1807999f8dbc5821e3a4759b6b806a56da2f2eee1029846a019710e95518f"} Dec 09 16:31:42 crc kubenswrapper[4716]: I1209 16:31:42.508163 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-cdztz" podStartSLOduration=1.697457715 podStartE2EDuration="2.508140924s" podCreationTimestamp="2025-12-09 16:31:40 +0000 UTC" firstStartedPulling="2025-12-09 16:31:41.057727949 +0000 UTC m=+4988.212471937" lastFinishedPulling="2025-12-09 16:31:41.868411148 +0000 UTC m=+4989.023155146" observedRunningTime="2025-12-09 16:31:42.50309759 +0000 UTC m=+4989.657841578" watchObservedRunningTime="2025-12-09 16:31:42.508140924 +0000 UTC m=+4989.662884912" Dec 09 16:31:42 crc kubenswrapper[4716]: I1209 16:31:42.903828 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pkcn7"] Dec 09 16:31:43 crc kubenswrapper[4716]: W1209 16:31:43.305784 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod44211914_77b6_4ea5_8b31_137e51144600.slice/crio-cad6ee5b9088f9b0a89ca8445503b5616cbe35f685e8fec87d8c7f09f66c4542 WatchSource:0}: Error finding container cad6ee5b9088f9b0a89ca8445503b5616cbe35f685e8fec87d8c7f09f66c4542: Status 404 returned error can't find the container with id cad6ee5b9088f9b0a89ca8445503b5616cbe35f685e8fec87d8c7f09f66c4542 Dec 09 16:31:43 crc kubenswrapper[4716]: I1209 16:31:43.493018 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pkcn7" event={"ID":"44211914-77b6-4ea5-8b31-137e51144600","Type":"ContainerStarted","Data":"cad6ee5b9088f9b0a89ca8445503b5616cbe35f685e8fec87d8c7f09f66c4542"} Dec 09 16:31:44 crc kubenswrapper[4716]: I1209 16:31:44.505151 4716 generic.go:334] "Generic (PLEG): container finished" podID="44211914-77b6-4ea5-8b31-137e51144600" containerID="3129f9f8efd1782ab2c2931d112893d06cb91d176da5c1db9af592475c6bfcd5" exitCode=0 Dec 09 16:31:44 crc kubenswrapper[4716]: I1209 16:31:44.505336 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pkcn7" event={"ID":"44211914-77b6-4ea5-8b31-137e51144600","Type":"ContainerDied","Data":"3129f9f8efd1782ab2c2931d112893d06cb91d176da5c1db9af592475c6bfcd5"} Dec 09 16:31:45 crc kubenswrapper[4716]: E1209 16:31:45.215871 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:31:47 crc kubenswrapper[4716]: I1209 16:31:47.537462 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pkcn7" event={"ID":"44211914-77b6-4ea5-8b31-137e51144600","Type":"ContainerStarted","Data":"f2ea26fbef73a18cb0d3cdf2d59853ca8103014b5bc90871a4ffb831c832cd7d"} Dec 09 16:31:48 crc kubenswrapper[4716]: E1209 16:31:48.218375 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:31:48 crc kubenswrapper[4716]: I1209 16:31:48.549209 4716 generic.go:334] "Generic (PLEG): container finished" podID="44211914-77b6-4ea5-8b31-137e51144600" containerID="f2ea26fbef73a18cb0d3cdf2d59853ca8103014b5bc90871a4ffb831c832cd7d" exitCode=0 Dec 09 16:31:48 crc kubenswrapper[4716]: I1209 16:31:48.549272 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pkcn7" event={"ID":"44211914-77b6-4ea5-8b31-137e51144600","Type":"ContainerDied","Data":"f2ea26fbef73a18cb0d3cdf2d59853ca8103014b5bc90871a4ffb831c832cd7d"} Dec 09 16:31:51 crc kubenswrapper[4716]: I1209 16:31:51.580267 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pkcn7" event={"ID":"44211914-77b6-4ea5-8b31-137e51144600","Type":"ContainerStarted","Data":"fa9b969fac8e692c24157ca329a0623d0a1131f2fe513aaf65be0a2210c519d5"} Dec 09 16:31:51 crc kubenswrapper[4716]: I1209 16:31:51.605737 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-pkcn7" podStartSLOduration=4.537416287 podStartE2EDuration="10.605711099s" podCreationTimestamp="2025-12-09 16:31:41 +0000 UTC" firstStartedPulling="2025-12-09 16:31:44.508450889 +0000 UTC m=+4991.663194877" lastFinishedPulling="2025-12-09 16:31:50.576745701 +0000 UTC m=+4997.731489689" observedRunningTime="2025-12-09 16:31:51.600299775 +0000 UTC m=+4998.755043783" watchObservedRunningTime="2025-12-09 16:31:51.605711099 +0000 UTC m=+4998.760455087" Dec 09 16:31:52 crc kubenswrapper[4716]: I1209 16:31:52.340778 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-pkcn7" Dec 09 16:31:52 crc kubenswrapper[4716]: I1209 16:31:52.341236 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-pkcn7" Dec 09 16:31:52 crc kubenswrapper[4716]: I1209 16:31:52.388786 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-pkcn7" Dec 09 16:31:58 crc kubenswrapper[4716]: E1209 16:31:58.218491 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:31:59 crc kubenswrapper[4716]: E1209 16:31:59.215293 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:32:02 crc kubenswrapper[4716]: I1209 16:32:02.407904 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-pkcn7" Dec 09 16:32:02 crc kubenswrapper[4716]: I1209 16:32:02.459493 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-pkcn7"] Dec 09 16:32:02 crc kubenswrapper[4716]: I1209 16:32:02.701402 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-pkcn7" podUID="44211914-77b6-4ea5-8b31-137e51144600" containerName="registry-server" containerID="cri-o://fa9b969fac8e692c24157ca329a0623d0a1131f2fe513aaf65be0a2210c519d5" gracePeriod=2 Dec 09 16:32:03 crc kubenswrapper[4716]: I1209 16:32:03.224004 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pkcn7" Dec 09 16:32:03 crc kubenswrapper[4716]: I1209 16:32:03.290949 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xxk9v\" (UniqueName: \"kubernetes.io/projected/44211914-77b6-4ea5-8b31-137e51144600-kube-api-access-xxk9v\") pod \"44211914-77b6-4ea5-8b31-137e51144600\" (UID: \"44211914-77b6-4ea5-8b31-137e51144600\") " Dec 09 16:32:03 crc kubenswrapper[4716]: I1209 16:32:03.291394 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44211914-77b6-4ea5-8b31-137e51144600-utilities\") pod \"44211914-77b6-4ea5-8b31-137e51144600\" (UID: \"44211914-77b6-4ea5-8b31-137e51144600\") " Dec 09 16:32:03 crc kubenswrapper[4716]: I1209 16:32:03.291515 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44211914-77b6-4ea5-8b31-137e51144600-catalog-content\") pod \"44211914-77b6-4ea5-8b31-137e51144600\" (UID: \"44211914-77b6-4ea5-8b31-137e51144600\") " Dec 09 16:32:03 crc kubenswrapper[4716]: I1209 16:32:03.292813 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/44211914-77b6-4ea5-8b31-137e51144600-utilities" (OuterVolumeSpecName: "utilities") pod "44211914-77b6-4ea5-8b31-137e51144600" (UID: "44211914-77b6-4ea5-8b31-137e51144600"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:32:03 crc kubenswrapper[4716]: I1209 16:32:03.296882 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44211914-77b6-4ea5-8b31-137e51144600-kube-api-access-xxk9v" (OuterVolumeSpecName: "kube-api-access-xxk9v") pod "44211914-77b6-4ea5-8b31-137e51144600" (UID: "44211914-77b6-4ea5-8b31-137e51144600"). InnerVolumeSpecName "kube-api-access-xxk9v". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:32:03 crc kubenswrapper[4716]: I1209 16:32:03.354726 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/44211914-77b6-4ea5-8b31-137e51144600-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "44211914-77b6-4ea5-8b31-137e51144600" (UID: "44211914-77b6-4ea5-8b31-137e51144600"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:32:03 crc kubenswrapper[4716]: I1209 16:32:03.394675 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44211914-77b6-4ea5-8b31-137e51144600-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 16:32:03 crc kubenswrapper[4716]: I1209 16:32:03.394712 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44211914-77b6-4ea5-8b31-137e51144600-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 16:32:03 crc kubenswrapper[4716]: I1209 16:32:03.394726 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xxk9v\" (UniqueName: \"kubernetes.io/projected/44211914-77b6-4ea5-8b31-137e51144600-kube-api-access-xxk9v\") on node \"crc\" DevicePath \"\"" Dec 09 16:32:03 crc kubenswrapper[4716]: I1209 16:32:03.718547 4716 generic.go:334] "Generic (PLEG): container finished" podID="44211914-77b6-4ea5-8b31-137e51144600" containerID="fa9b969fac8e692c24157ca329a0623d0a1131f2fe513aaf65be0a2210c519d5" exitCode=0 Dec 09 16:32:03 crc kubenswrapper[4716]: I1209 16:32:03.718601 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pkcn7" event={"ID":"44211914-77b6-4ea5-8b31-137e51144600","Type":"ContainerDied","Data":"fa9b969fac8e692c24157ca329a0623d0a1131f2fe513aaf65be0a2210c519d5"} Dec 09 16:32:03 crc kubenswrapper[4716]: I1209 16:32:03.718653 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pkcn7" event={"ID":"44211914-77b6-4ea5-8b31-137e51144600","Type":"ContainerDied","Data":"cad6ee5b9088f9b0a89ca8445503b5616cbe35f685e8fec87d8c7f09f66c4542"} Dec 09 16:32:03 crc kubenswrapper[4716]: I1209 16:32:03.718675 4716 scope.go:117] "RemoveContainer" containerID="fa9b969fac8e692c24157ca329a0623d0a1131f2fe513aaf65be0a2210c519d5" Dec 09 16:32:03 crc kubenswrapper[4716]: I1209 16:32:03.718845 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pkcn7" Dec 09 16:32:03 crc kubenswrapper[4716]: I1209 16:32:03.749172 4716 scope.go:117] "RemoveContainer" containerID="f2ea26fbef73a18cb0d3cdf2d59853ca8103014b5bc90871a4ffb831c832cd7d" Dec 09 16:32:03 crc kubenswrapper[4716]: I1209 16:32:03.770063 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-pkcn7"] Dec 09 16:32:03 crc kubenswrapper[4716]: I1209 16:32:03.786476 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-pkcn7"] Dec 09 16:32:04 crc kubenswrapper[4716]: I1209 16:32:04.315846 4716 scope.go:117] "RemoveContainer" containerID="3129f9f8efd1782ab2c2931d112893d06cb91d176da5c1db9af592475c6bfcd5" Dec 09 16:32:04 crc kubenswrapper[4716]: I1209 16:32:04.469436 4716 scope.go:117] "RemoveContainer" containerID="fa9b969fac8e692c24157ca329a0623d0a1131f2fe513aaf65be0a2210c519d5" Dec 09 16:32:04 crc kubenswrapper[4716]: E1209 16:32:04.469928 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fa9b969fac8e692c24157ca329a0623d0a1131f2fe513aaf65be0a2210c519d5\": container with ID starting with fa9b969fac8e692c24157ca329a0623d0a1131f2fe513aaf65be0a2210c519d5 not found: ID does not exist" containerID="fa9b969fac8e692c24157ca329a0623d0a1131f2fe513aaf65be0a2210c519d5" Dec 09 16:32:04 crc kubenswrapper[4716]: I1209 16:32:04.469963 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fa9b969fac8e692c24157ca329a0623d0a1131f2fe513aaf65be0a2210c519d5"} err="failed to get container status \"fa9b969fac8e692c24157ca329a0623d0a1131f2fe513aaf65be0a2210c519d5\": rpc error: code = NotFound desc = could not find container \"fa9b969fac8e692c24157ca329a0623d0a1131f2fe513aaf65be0a2210c519d5\": container with ID starting with fa9b969fac8e692c24157ca329a0623d0a1131f2fe513aaf65be0a2210c519d5 not found: ID does not exist" Dec 09 16:32:04 crc kubenswrapper[4716]: I1209 16:32:04.469995 4716 scope.go:117] "RemoveContainer" containerID="f2ea26fbef73a18cb0d3cdf2d59853ca8103014b5bc90871a4ffb831c832cd7d" Dec 09 16:32:04 crc kubenswrapper[4716]: E1209 16:32:04.470418 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f2ea26fbef73a18cb0d3cdf2d59853ca8103014b5bc90871a4ffb831c832cd7d\": container with ID starting with f2ea26fbef73a18cb0d3cdf2d59853ca8103014b5bc90871a4ffb831c832cd7d not found: ID does not exist" containerID="f2ea26fbef73a18cb0d3cdf2d59853ca8103014b5bc90871a4ffb831c832cd7d" Dec 09 16:32:04 crc kubenswrapper[4716]: I1209 16:32:04.470449 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f2ea26fbef73a18cb0d3cdf2d59853ca8103014b5bc90871a4ffb831c832cd7d"} err="failed to get container status \"f2ea26fbef73a18cb0d3cdf2d59853ca8103014b5bc90871a4ffb831c832cd7d\": rpc error: code = NotFound desc = could not find container \"f2ea26fbef73a18cb0d3cdf2d59853ca8103014b5bc90871a4ffb831c832cd7d\": container with ID starting with f2ea26fbef73a18cb0d3cdf2d59853ca8103014b5bc90871a4ffb831c832cd7d not found: ID does not exist" Dec 09 16:32:04 crc kubenswrapper[4716]: I1209 16:32:04.470466 4716 scope.go:117] "RemoveContainer" containerID="3129f9f8efd1782ab2c2931d112893d06cb91d176da5c1db9af592475c6bfcd5" Dec 09 16:32:04 crc kubenswrapper[4716]: E1209 16:32:04.470824 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3129f9f8efd1782ab2c2931d112893d06cb91d176da5c1db9af592475c6bfcd5\": container with ID starting with 3129f9f8efd1782ab2c2931d112893d06cb91d176da5c1db9af592475c6bfcd5 not found: ID does not exist" containerID="3129f9f8efd1782ab2c2931d112893d06cb91d176da5c1db9af592475c6bfcd5" Dec 09 16:32:04 crc kubenswrapper[4716]: I1209 16:32:04.470849 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3129f9f8efd1782ab2c2931d112893d06cb91d176da5c1db9af592475c6bfcd5"} err="failed to get container status \"3129f9f8efd1782ab2c2931d112893d06cb91d176da5c1db9af592475c6bfcd5\": rpc error: code = NotFound desc = could not find container \"3129f9f8efd1782ab2c2931d112893d06cb91d176da5c1db9af592475c6bfcd5\": container with ID starting with 3129f9f8efd1782ab2c2931d112893d06cb91d176da5c1db9af592475c6bfcd5 not found: ID does not exist" Dec 09 16:32:05 crc kubenswrapper[4716]: I1209 16:32:05.226606 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44211914-77b6-4ea5-8b31-137e51144600" path="/var/lib/kubelet/pods/44211914-77b6-4ea5-8b31-137e51144600/volumes" Dec 09 16:32:10 crc kubenswrapper[4716]: E1209 16:32:10.215960 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:32:10 crc kubenswrapper[4716]: E1209 16:32:10.215960 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:32:17 crc kubenswrapper[4716]: I1209 16:32:17.922776 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 16:32:17 crc kubenswrapper[4716]: I1209 16:32:17.923354 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 16:32:24 crc kubenswrapper[4716]: E1209 16:32:24.216993 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:32:25 crc kubenswrapper[4716]: E1209 16:32:25.216804 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:32:38 crc kubenswrapper[4716]: E1209 16:32:38.215903 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:32:40 crc kubenswrapper[4716]: E1209 16:32:40.216344 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:32:47 crc kubenswrapper[4716]: I1209 16:32:47.922103 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 16:32:47 crc kubenswrapper[4716]: I1209 16:32:47.922782 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 16:32:49 crc kubenswrapper[4716]: E1209 16:32:49.216311 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:32:52 crc kubenswrapper[4716]: E1209 16:32:52.217224 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:33:01 crc kubenswrapper[4716]: E1209 16:33:01.218272 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:33:07 crc kubenswrapper[4716]: E1209 16:33:07.216032 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:33:14 crc kubenswrapper[4716]: E1209 16:33:14.218281 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:33:17 crc kubenswrapper[4716]: I1209 16:33:17.922354 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 16:33:17 crc kubenswrapper[4716]: I1209 16:33:17.922977 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 16:33:17 crc kubenswrapper[4716]: I1209 16:33:17.923033 4716 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" Dec 09 16:33:17 crc kubenswrapper[4716]: I1209 16:33:17.924017 4716 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7c3ee6b5713fd27686aeb15dc72c9af34b2b0fc615a90f78e1715c3ca64bfecc"} pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 16:33:17 crc kubenswrapper[4716]: I1209 16:33:17.924099 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" containerID="cri-o://7c3ee6b5713fd27686aeb15dc72c9af34b2b0fc615a90f78e1715c3ca64bfecc" gracePeriod=600 Dec 09 16:33:18 crc kubenswrapper[4716]: I1209 16:33:18.610915 4716 generic.go:334] "Generic (PLEG): container finished" podID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerID="7c3ee6b5713fd27686aeb15dc72c9af34b2b0fc615a90f78e1715c3ca64bfecc" exitCode=0 Dec 09 16:33:18 crc kubenswrapper[4716]: I1209 16:33:18.611001 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerDied","Data":"7c3ee6b5713fd27686aeb15dc72c9af34b2b0fc615a90f78e1715c3ca64bfecc"} Dec 09 16:33:18 crc kubenswrapper[4716]: I1209 16:33:18.611520 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerStarted","Data":"dda6ec4e8c901cf880782dc4aa7fc34b02c4276f74fb441e2350127e4d167244"} Dec 09 16:33:18 crc kubenswrapper[4716]: I1209 16:33:18.611549 4716 scope.go:117] "RemoveContainer" containerID="eefffdae028576e327313dd73908e2cff9e01c492e5c8eb5f6860a6bc5b7d875" Dec 09 16:33:19 crc kubenswrapper[4716]: E1209 16:33:19.216124 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:33:29 crc kubenswrapper[4716]: E1209 16:33:29.216949 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:33:33 crc kubenswrapper[4716]: E1209 16:33:33.239961 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:33:44 crc kubenswrapper[4716]: E1209 16:33:44.216212 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:33:48 crc kubenswrapper[4716]: E1209 16:33:48.217432 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:33:56 crc kubenswrapper[4716]: E1209 16:33:56.216791 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:34:02 crc kubenswrapper[4716]: E1209 16:34:02.446454 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:34:10 crc kubenswrapper[4716]: I1209 16:34:10.217100 4716 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 09 16:34:10 crc kubenswrapper[4716]: E1209 16:34:10.350459 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 09 16:34:10 crc kubenswrapper[4716]: E1209 16:34:10.350550 4716 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 09 16:34:10 crc kubenswrapper[4716]: E1209 16:34:10.350718 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qfm49,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-msshl_openstack(239c4119-944c-46e1-9425-285eeb6e0204): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 16:34:10 crc kubenswrapper[4716]: E1209 16:34:10.351879 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:34:14 crc kubenswrapper[4716]: E1209 16:34:14.228218 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:34:25 crc kubenswrapper[4716]: E1209 16:34:25.216302 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:34:25 crc kubenswrapper[4716]: E1209 16:34:25.363787 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 16:34:25 crc kubenswrapper[4716]: E1209 16:34:25.363894 4716 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 16:34:25 crc kubenswrapper[4716]: E1209 16:34:25.364093 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5fbh9fh77h667h669h94h589h59dh5bdh5bch9fh68h689h699h559h549h56fh5f5h68fh557h5cch7fh66ch6ch86h56h75h55fh58h5ffhf9h5fdq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lz724,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(29cebb2d-8cdb-49de-a29d-1d02808e46a9): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 16:34:25 crc kubenswrapper[4716]: E1209 16:34:25.365676 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:34:39 crc kubenswrapper[4716]: E1209 16:34:39.218751 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:34:39 crc kubenswrapper[4716]: E1209 16:34:39.218847 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:34:51 crc kubenswrapper[4716]: E1209 16:34:51.216187 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:34:54 crc kubenswrapper[4716]: E1209 16:34:54.216502 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:35:06 crc kubenswrapper[4716]: E1209 16:35:06.216632 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:35:07 crc kubenswrapper[4716]: E1209 16:35:07.215720 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:35:18 crc kubenswrapper[4716]: E1209 16:35:18.216477 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:35:18 crc kubenswrapper[4716]: E1209 16:35:18.219252 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:35:30 crc kubenswrapper[4716]: E1209 16:35:30.227532 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:35:30 crc kubenswrapper[4716]: E1209 16:35:30.227867 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:35:45 crc kubenswrapper[4716]: E1209 16:35:45.218733 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:35:45 crc kubenswrapper[4716]: E1209 16:35:45.218757 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:35:47 crc kubenswrapper[4716]: I1209 16:35:47.922054 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 16:35:47 crc kubenswrapper[4716]: I1209 16:35:47.922356 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 16:35:56 crc kubenswrapper[4716]: E1209 16:35:56.216354 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:35:56 crc kubenswrapper[4716]: E1209 16:35:56.216795 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:36:10 crc kubenswrapper[4716]: E1209 16:36:10.215763 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:36:11 crc kubenswrapper[4716]: E1209 16:36:11.217128 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:36:17 crc kubenswrapper[4716]: I1209 16:36:17.922844 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 16:36:17 crc kubenswrapper[4716]: I1209 16:36:17.923458 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 16:36:21 crc kubenswrapper[4716]: E1209 16:36:21.217378 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:36:26 crc kubenswrapper[4716]: E1209 16:36:26.216145 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:36:33 crc kubenswrapper[4716]: E1209 16:36:33.225249 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:36:41 crc kubenswrapper[4716]: E1209 16:36:41.233133 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:36:44 crc kubenswrapper[4716]: E1209 16:36:44.216658 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:36:47 crc kubenswrapper[4716]: I1209 16:36:47.921859 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 16:36:47 crc kubenswrapper[4716]: I1209 16:36:47.922396 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 16:36:47 crc kubenswrapper[4716]: I1209 16:36:47.922439 4716 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" Dec 09 16:36:47 crc kubenswrapper[4716]: I1209 16:36:47.923326 4716 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"dda6ec4e8c901cf880782dc4aa7fc34b02c4276f74fb441e2350127e4d167244"} pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 16:36:47 crc kubenswrapper[4716]: I1209 16:36:47.923384 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" containerID="cri-o://dda6ec4e8c901cf880782dc4aa7fc34b02c4276f74fb441e2350127e4d167244" gracePeriod=600 Dec 09 16:36:48 crc kubenswrapper[4716]: E1209 16:36:48.045614 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:36:48 crc kubenswrapper[4716]: I1209 16:36:48.535351 4716 generic.go:334] "Generic (PLEG): container finished" podID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerID="dda6ec4e8c901cf880782dc4aa7fc34b02c4276f74fb441e2350127e4d167244" exitCode=0 Dec 09 16:36:48 crc kubenswrapper[4716]: I1209 16:36:48.535408 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerDied","Data":"dda6ec4e8c901cf880782dc4aa7fc34b02c4276f74fb441e2350127e4d167244"} Dec 09 16:36:48 crc kubenswrapper[4716]: I1209 16:36:48.535498 4716 scope.go:117] "RemoveContainer" containerID="7c3ee6b5713fd27686aeb15dc72c9af34b2b0fc615a90f78e1715c3ca64bfecc" Dec 09 16:36:48 crc kubenswrapper[4716]: I1209 16:36:48.536372 4716 scope.go:117] "RemoveContainer" containerID="dda6ec4e8c901cf880782dc4aa7fc34b02c4276f74fb441e2350127e4d167244" Dec 09 16:36:48 crc kubenswrapper[4716]: E1209 16:36:48.536756 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:36:52 crc kubenswrapper[4716]: E1209 16:36:52.216106 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:36:59 crc kubenswrapper[4716]: E1209 16:36:59.226172 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:37:02 crc kubenswrapper[4716]: I1209 16:37:02.214413 4716 scope.go:117] "RemoveContainer" containerID="dda6ec4e8c901cf880782dc4aa7fc34b02c4276f74fb441e2350127e4d167244" Dec 09 16:37:02 crc kubenswrapper[4716]: E1209 16:37:02.215306 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:37:04 crc kubenswrapper[4716]: E1209 16:37:04.218695 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:37:14 crc kubenswrapper[4716]: E1209 16:37:14.216599 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:37:15 crc kubenswrapper[4716]: I1209 16:37:15.213769 4716 scope.go:117] "RemoveContainer" containerID="dda6ec4e8c901cf880782dc4aa7fc34b02c4276f74fb441e2350127e4d167244" Dec 09 16:37:15 crc kubenswrapper[4716]: E1209 16:37:15.214401 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:37:15 crc kubenswrapper[4716]: E1209 16:37:15.215656 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:37:25 crc kubenswrapper[4716]: E1209 16:37:25.232946 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:37:29 crc kubenswrapper[4716]: E1209 16:37:29.216015 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:37:30 crc kubenswrapper[4716]: I1209 16:37:30.214779 4716 scope.go:117] "RemoveContainer" containerID="dda6ec4e8c901cf880782dc4aa7fc34b02c4276f74fb441e2350127e4d167244" Dec 09 16:37:30 crc kubenswrapper[4716]: E1209 16:37:30.215534 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:37:37 crc kubenswrapper[4716]: E1209 16:37:37.216707 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:37:42 crc kubenswrapper[4716]: I1209 16:37:42.471866 4716 scope.go:117] "RemoveContainer" containerID="dda6ec4e8c901cf880782dc4aa7fc34b02c4276f74fb441e2350127e4d167244" Dec 09 16:37:42 crc kubenswrapper[4716]: E1209 16:37:42.473378 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:37:43 crc kubenswrapper[4716]: E1209 16:37:43.225278 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:37:48 crc kubenswrapper[4716]: E1209 16:37:48.216649 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:37:48 crc kubenswrapper[4716]: I1209 16:37:48.912844 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-5trr8"] Dec 09 16:37:48 crc kubenswrapper[4716]: E1209 16:37:48.913694 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44211914-77b6-4ea5-8b31-137e51144600" containerName="registry-server" Dec 09 16:37:48 crc kubenswrapper[4716]: I1209 16:37:48.913734 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="44211914-77b6-4ea5-8b31-137e51144600" containerName="registry-server" Dec 09 16:37:48 crc kubenswrapper[4716]: E1209 16:37:48.913783 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44211914-77b6-4ea5-8b31-137e51144600" containerName="extract-content" Dec 09 16:37:48 crc kubenswrapper[4716]: I1209 16:37:48.913789 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="44211914-77b6-4ea5-8b31-137e51144600" containerName="extract-content" Dec 09 16:37:48 crc kubenswrapper[4716]: E1209 16:37:48.913818 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44211914-77b6-4ea5-8b31-137e51144600" containerName="extract-utilities" Dec 09 16:37:48 crc kubenswrapper[4716]: I1209 16:37:48.913825 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="44211914-77b6-4ea5-8b31-137e51144600" containerName="extract-utilities" Dec 09 16:37:48 crc kubenswrapper[4716]: I1209 16:37:48.914181 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="44211914-77b6-4ea5-8b31-137e51144600" containerName="registry-server" Dec 09 16:37:48 crc kubenswrapper[4716]: I1209 16:37:48.919007 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5trr8" Dec 09 16:37:48 crc kubenswrapper[4716]: I1209 16:37:48.946336 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5trr8"] Dec 09 16:37:49 crc kubenswrapper[4716]: I1209 16:37:49.081336 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51b61926-b287-4ed7-a460-ccb15f73d7aa-utilities\") pod \"community-operators-5trr8\" (UID: \"51b61926-b287-4ed7-a460-ccb15f73d7aa\") " pod="openshift-marketplace/community-operators-5trr8" Dec 09 16:37:49 crc kubenswrapper[4716]: I1209 16:37:49.081445 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5fl4\" (UniqueName: \"kubernetes.io/projected/51b61926-b287-4ed7-a460-ccb15f73d7aa-kube-api-access-g5fl4\") pod \"community-operators-5trr8\" (UID: \"51b61926-b287-4ed7-a460-ccb15f73d7aa\") " pod="openshift-marketplace/community-operators-5trr8" Dec 09 16:37:49 crc kubenswrapper[4716]: I1209 16:37:49.082488 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51b61926-b287-4ed7-a460-ccb15f73d7aa-catalog-content\") pod \"community-operators-5trr8\" (UID: \"51b61926-b287-4ed7-a460-ccb15f73d7aa\") " pod="openshift-marketplace/community-operators-5trr8" Dec 09 16:37:49 crc kubenswrapper[4716]: I1209 16:37:49.184796 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51b61926-b287-4ed7-a460-ccb15f73d7aa-catalog-content\") pod \"community-operators-5trr8\" (UID: \"51b61926-b287-4ed7-a460-ccb15f73d7aa\") " pod="openshift-marketplace/community-operators-5trr8" Dec 09 16:37:49 crc kubenswrapper[4716]: I1209 16:37:49.184879 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51b61926-b287-4ed7-a460-ccb15f73d7aa-utilities\") pod \"community-operators-5trr8\" (UID: \"51b61926-b287-4ed7-a460-ccb15f73d7aa\") " pod="openshift-marketplace/community-operators-5trr8" Dec 09 16:37:49 crc kubenswrapper[4716]: I1209 16:37:49.184928 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5fl4\" (UniqueName: \"kubernetes.io/projected/51b61926-b287-4ed7-a460-ccb15f73d7aa-kube-api-access-g5fl4\") pod \"community-operators-5trr8\" (UID: \"51b61926-b287-4ed7-a460-ccb15f73d7aa\") " pod="openshift-marketplace/community-operators-5trr8" Dec 09 16:37:49 crc kubenswrapper[4716]: I1209 16:37:49.185614 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51b61926-b287-4ed7-a460-ccb15f73d7aa-catalog-content\") pod \"community-operators-5trr8\" (UID: \"51b61926-b287-4ed7-a460-ccb15f73d7aa\") " pod="openshift-marketplace/community-operators-5trr8" Dec 09 16:37:49 crc kubenswrapper[4716]: I1209 16:37:49.185891 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51b61926-b287-4ed7-a460-ccb15f73d7aa-utilities\") pod \"community-operators-5trr8\" (UID: \"51b61926-b287-4ed7-a460-ccb15f73d7aa\") " pod="openshift-marketplace/community-operators-5trr8" Dec 09 16:37:53 crc kubenswrapper[4716]: I1209 16:37:53.223607 4716 scope.go:117] "RemoveContainer" containerID="dda6ec4e8c901cf880782dc4aa7fc34b02c4276f74fb441e2350127e4d167244" Dec 09 16:37:53 crc kubenswrapper[4716]: E1209 16:37:53.224748 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:37:54 crc kubenswrapper[4716]: I1209 16:37:54.093643 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-6kmv6/must-gather-8b94b"] Dec 09 16:37:54 crc kubenswrapper[4716]: I1209 16:37:54.108935 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6kmv6/must-gather-8b94b" Dec 09 16:37:54 crc kubenswrapper[4716]: I1209 16:37:54.112745 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-6kmv6"/"openshift-service-ca.crt" Dec 09 16:37:54 crc kubenswrapper[4716]: I1209 16:37:54.119575 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-6kmv6"/"kube-root-ca.crt" Dec 09 16:37:54 crc kubenswrapper[4716]: I1209 16:37:54.220883 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4bxtk\" (UniqueName: \"kubernetes.io/projected/32e41e7d-e621-42a2-a0e3-1e7d816dbe22-kube-api-access-4bxtk\") pod \"must-gather-8b94b\" (UID: \"32e41e7d-e621-42a2-a0e3-1e7d816dbe22\") " pod="openshift-must-gather-6kmv6/must-gather-8b94b" Dec 09 16:37:54 crc kubenswrapper[4716]: I1209 16:37:54.220954 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/32e41e7d-e621-42a2-a0e3-1e7d816dbe22-must-gather-output\") pod \"must-gather-8b94b\" (UID: \"32e41e7d-e621-42a2-a0e3-1e7d816dbe22\") " pod="openshift-must-gather-6kmv6/must-gather-8b94b" Dec 09 16:37:54 crc kubenswrapper[4716]: I1209 16:37:54.225464 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-6kmv6/must-gather-8b94b"] Dec 09 16:37:54 crc kubenswrapper[4716]: I1209 16:37:54.327502 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4bxtk\" (UniqueName: \"kubernetes.io/projected/32e41e7d-e621-42a2-a0e3-1e7d816dbe22-kube-api-access-4bxtk\") pod \"must-gather-8b94b\" (UID: \"32e41e7d-e621-42a2-a0e3-1e7d816dbe22\") " pod="openshift-must-gather-6kmv6/must-gather-8b94b" Dec 09 16:37:54 crc kubenswrapper[4716]: I1209 16:37:54.327562 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/32e41e7d-e621-42a2-a0e3-1e7d816dbe22-must-gather-output\") pod \"must-gather-8b94b\" (UID: \"32e41e7d-e621-42a2-a0e3-1e7d816dbe22\") " pod="openshift-must-gather-6kmv6/must-gather-8b94b" Dec 09 16:37:54 crc kubenswrapper[4716]: I1209 16:37:54.331479 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/32e41e7d-e621-42a2-a0e3-1e7d816dbe22-must-gather-output\") pod \"must-gather-8b94b\" (UID: \"32e41e7d-e621-42a2-a0e3-1e7d816dbe22\") " pod="openshift-must-gather-6kmv6/must-gather-8b94b" Dec 09 16:37:58 crc kubenswrapper[4716]: E1209 16:37:58.217136 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:38:01 crc kubenswrapper[4716]: E1209 16:38:01.216040 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:38:02 crc kubenswrapper[4716]: I1209 16:38:02.408333 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4bxtk\" (UniqueName: \"kubernetes.io/projected/32e41e7d-e621-42a2-a0e3-1e7d816dbe22-kube-api-access-4bxtk\") pod \"must-gather-8b94b\" (UID: \"32e41e7d-e621-42a2-a0e3-1e7d816dbe22\") " pod="openshift-must-gather-6kmv6/must-gather-8b94b" Dec 09 16:38:02 crc kubenswrapper[4716]: I1209 16:38:02.409906 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5fl4\" (UniqueName: \"kubernetes.io/projected/51b61926-b287-4ed7-a460-ccb15f73d7aa-kube-api-access-g5fl4\") pod \"community-operators-5trr8\" (UID: \"51b61926-b287-4ed7-a460-ccb15f73d7aa\") " pod="openshift-marketplace/community-operators-5trr8" Dec 09 16:38:02 crc kubenswrapper[4716]: I1209 16:38:02.447970 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5trr8" Dec 09 16:38:02 crc kubenswrapper[4716]: I1209 16:38:02.456927 4716 trace.go:236] Trace[1978381833]: "Calculate volume metrics of storage for pod openshift-logging/logging-loki-compactor-0" (09-Dec-2025 16:37:43.009) (total time: 19446ms): Dec 09 16:38:02 crc kubenswrapper[4716]: Trace[1978381833]: [19.446927143s] [19.446927143s] END Dec 09 16:38:02 crc kubenswrapper[4716]: I1209 16:38:02.457021 4716 trace.go:236] Trace[1831481892]: "Calculate volume metrics of storage for pod minio-dev/minio" (09-Dec-2025 16:37:49.407) (total time: 13049ms): Dec 09 16:38:02 crc kubenswrapper[4716]: Trace[1831481892]: [13.049693129s] [13.049693129s] END Dec 09 16:38:02 crc kubenswrapper[4716]: I1209 16:38:02.580834 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6kmv6/must-gather-8b94b" Dec 09 16:38:03 crc kubenswrapper[4716]: I1209 16:38:03.280136 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-6kmv6/must-gather-8b94b"] Dec 09 16:38:03 crc kubenswrapper[4716]: I1209 16:38:03.339959 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6kmv6/must-gather-8b94b" event={"ID":"32e41e7d-e621-42a2-a0e3-1e7d816dbe22","Type":"ContainerStarted","Data":"eef441c4688ef7822ffcef131dee651094106ad99b03bde8f654e470d00edc53"} Dec 09 16:38:03 crc kubenswrapper[4716]: I1209 16:38:03.501381 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5trr8"] Dec 09 16:38:04 crc kubenswrapper[4716]: I1209 16:38:04.384776 4716 generic.go:334] "Generic (PLEG): container finished" podID="51b61926-b287-4ed7-a460-ccb15f73d7aa" containerID="100ebcc497c8e2cf00006820a6014306721f5fa5ef6de089729ab363558894bb" exitCode=0 Dec 09 16:38:04 crc kubenswrapper[4716]: I1209 16:38:04.385077 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5trr8" event={"ID":"51b61926-b287-4ed7-a460-ccb15f73d7aa","Type":"ContainerDied","Data":"100ebcc497c8e2cf00006820a6014306721f5fa5ef6de089729ab363558894bb"} Dec 09 16:38:04 crc kubenswrapper[4716]: I1209 16:38:04.385111 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5trr8" event={"ID":"51b61926-b287-4ed7-a460-ccb15f73d7aa","Type":"ContainerStarted","Data":"37d83b90035785349e2d81fe0234d8899a643f76e406c4aa19d8bb47ddbc4cbe"} Dec 09 16:38:05 crc kubenswrapper[4716]: I1209 16:38:05.214990 4716 scope.go:117] "RemoveContainer" containerID="dda6ec4e8c901cf880782dc4aa7fc34b02c4276f74fb441e2350127e4d167244" Dec 09 16:38:05 crc kubenswrapper[4716]: E1209 16:38:05.215941 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:38:06 crc kubenswrapper[4716]: I1209 16:38:06.415517 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5trr8" event={"ID":"51b61926-b287-4ed7-a460-ccb15f73d7aa","Type":"ContainerStarted","Data":"c7433b618f79090fe26c0eaaef003d9b463ab5c2d2f3ca7f219e17c611a8c2d9"} Dec 09 16:38:07 crc kubenswrapper[4716]: I1209 16:38:07.427409 4716 generic.go:334] "Generic (PLEG): container finished" podID="51b61926-b287-4ed7-a460-ccb15f73d7aa" containerID="c7433b618f79090fe26c0eaaef003d9b463ab5c2d2f3ca7f219e17c611a8c2d9" exitCode=0 Dec 09 16:38:07 crc kubenswrapper[4716]: I1209 16:38:07.427476 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5trr8" event={"ID":"51b61926-b287-4ed7-a460-ccb15f73d7aa","Type":"ContainerDied","Data":"c7433b618f79090fe26c0eaaef003d9b463ab5c2d2f3ca7f219e17c611a8c2d9"} Dec 09 16:38:09 crc kubenswrapper[4716]: I1209 16:38:09.453038 4716 generic.go:334] "Generic (PLEG): container finished" podID="0fd09ac5-846e-4895-a05f-0c39613f2ea8" containerID="0ea1807999f8dbc5821e3a4759b6b806a56da2f2eee1029846a019710e95518f" exitCode=2 Dec 09 16:38:09 crc kubenswrapper[4716]: I1209 16:38:09.453137 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-cdztz" event={"ID":"0fd09ac5-846e-4895-a05f-0c39613f2ea8","Type":"ContainerDied","Data":"0ea1807999f8dbc5821e3a4759b6b806a56da2f2eee1029846a019710e95518f"} Dec 09 16:38:11 crc kubenswrapper[4716]: I1209 16:38:11.655994 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-cdztz" Dec 09 16:38:11 crc kubenswrapper[4716]: I1209 16:38:11.815604 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0fd09ac5-846e-4895-a05f-0c39613f2ea8-inventory\") pod \"0fd09ac5-846e-4895-a05f-0c39613f2ea8\" (UID: \"0fd09ac5-846e-4895-a05f-0c39613f2ea8\") " Dec 09 16:38:11 crc kubenswrapper[4716]: I1209 16:38:11.816053 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h5jfg\" (UniqueName: \"kubernetes.io/projected/0fd09ac5-846e-4895-a05f-0c39613f2ea8-kube-api-access-h5jfg\") pod \"0fd09ac5-846e-4895-a05f-0c39613f2ea8\" (UID: \"0fd09ac5-846e-4895-a05f-0c39613f2ea8\") " Dec 09 16:38:11 crc kubenswrapper[4716]: I1209 16:38:11.816659 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0fd09ac5-846e-4895-a05f-0c39613f2ea8-ssh-key\") pod \"0fd09ac5-846e-4895-a05f-0c39613f2ea8\" (UID: \"0fd09ac5-846e-4895-a05f-0c39613f2ea8\") " Dec 09 16:38:11 crc kubenswrapper[4716]: I1209 16:38:11.824214 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0fd09ac5-846e-4895-a05f-0c39613f2ea8-kube-api-access-h5jfg" (OuterVolumeSpecName: "kube-api-access-h5jfg") pod "0fd09ac5-846e-4895-a05f-0c39613f2ea8" (UID: "0fd09ac5-846e-4895-a05f-0c39613f2ea8"). InnerVolumeSpecName "kube-api-access-h5jfg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:38:11 crc kubenswrapper[4716]: I1209 16:38:11.857894 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0fd09ac5-846e-4895-a05f-0c39613f2ea8-inventory" (OuterVolumeSpecName: "inventory") pod "0fd09ac5-846e-4895-a05f-0c39613f2ea8" (UID: "0fd09ac5-846e-4895-a05f-0c39613f2ea8"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:38:11 crc kubenswrapper[4716]: I1209 16:38:11.885735 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0fd09ac5-846e-4895-a05f-0c39613f2ea8-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "0fd09ac5-846e-4895-a05f-0c39613f2ea8" (UID: "0fd09ac5-846e-4895-a05f-0c39613f2ea8"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:38:11 crc kubenswrapper[4716]: I1209 16:38:11.920806 4716 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0fd09ac5-846e-4895-a05f-0c39613f2ea8-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 09 16:38:11 crc kubenswrapper[4716]: I1209 16:38:11.920849 4716 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0fd09ac5-846e-4895-a05f-0c39613f2ea8-inventory\") on node \"crc\" DevicePath \"\"" Dec 09 16:38:11 crc kubenswrapper[4716]: I1209 16:38:11.920860 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h5jfg\" (UniqueName: \"kubernetes.io/projected/0fd09ac5-846e-4895-a05f-0c39613f2ea8-kube-api-access-h5jfg\") on node \"crc\" DevicePath \"\"" Dec 09 16:38:12 crc kubenswrapper[4716]: I1209 16:38:12.557919 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-cdztz" event={"ID":"0fd09ac5-846e-4895-a05f-0c39613f2ea8","Type":"ContainerDied","Data":"718ca07ab16d646ca15df7b0daab0814d42be070d2870eb5627b1b38485dd8f8"} Dec 09 16:38:12 crc kubenswrapper[4716]: I1209 16:38:12.558249 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="718ca07ab16d646ca15df7b0daab0814d42be070d2870eb5627b1b38485dd8f8" Dec 09 16:38:12 crc kubenswrapper[4716]: I1209 16:38:12.557954 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-cdztz" Dec 09 16:38:12 crc kubenswrapper[4716]: I1209 16:38:12.560406 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5trr8" event={"ID":"51b61926-b287-4ed7-a460-ccb15f73d7aa","Type":"ContainerStarted","Data":"3efa5501f24ef0d367ab599aa7e2c48dd90fb083f7fe9f735f83cbcbbca4bcf1"} Dec 09 16:38:12 crc kubenswrapper[4716]: I1209 16:38:12.564842 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6kmv6/must-gather-8b94b" event={"ID":"32e41e7d-e621-42a2-a0e3-1e7d816dbe22","Type":"ContainerStarted","Data":"004eddbbaa5b1596e0ffd6a9223e85d4e03a6cd1cba8e272d5aa8dbb30711ef9"} Dec 09 16:38:12 crc kubenswrapper[4716]: I1209 16:38:12.564901 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6kmv6/must-gather-8b94b" event={"ID":"32e41e7d-e621-42a2-a0e3-1e7d816dbe22","Type":"ContainerStarted","Data":"87640b9324691c8ac6ebba02b51cc1edcad96b2e22cc8a317e9289e05431be68"} Dec 09 16:38:12 crc kubenswrapper[4716]: I1209 16:38:12.589671 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-5trr8" podStartSLOduration=17.704167192 podStartE2EDuration="24.589593944s" podCreationTimestamp="2025-12-09 16:37:48 +0000 UTC" firstStartedPulling="2025-12-09 16:38:04.396694919 +0000 UTC m=+5371.551438907" lastFinishedPulling="2025-12-09 16:38:11.282121671 +0000 UTC m=+5378.436865659" observedRunningTime="2025-12-09 16:38:12.583546072 +0000 UTC m=+5379.738290060" watchObservedRunningTime="2025-12-09 16:38:12.589593944 +0000 UTC m=+5379.744337932" Dec 09 16:38:12 crc kubenswrapper[4716]: I1209 16:38:12.618954 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-6kmv6/must-gather-8b94b" podStartSLOduration=10.466930653 podStartE2EDuration="18.618925717s" podCreationTimestamp="2025-12-09 16:37:54 +0000 UTC" firstStartedPulling="2025-12-09 16:38:03.240022749 +0000 UTC m=+5370.394766737" lastFinishedPulling="2025-12-09 16:38:11.392017813 +0000 UTC m=+5378.546761801" observedRunningTime="2025-12-09 16:38:12.613115542 +0000 UTC m=+5379.767859550" watchObservedRunningTime="2025-12-09 16:38:12.618925717 +0000 UTC m=+5379.773669705" Dec 09 16:38:13 crc kubenswrapper[4716]: E1209 16:38:13.226213 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:38:16 crc kubenswrapper[4716]: E1209 16:38:16.216669 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:38:17 crc kubenswrapper[4716]: I1209 16:38:17.031223 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-6kmv6/crc-debug-n8gxh"] Dec 09 16:38:17 crc kubenswrapper[4716]: E1209 16:38:17.032046 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0fd09ac5-846e-4895-a05f-0c39613f2ea8" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 16:38:17 crc kubenswrapper[4716]: I1209 16:38:17.032068 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="0fd09ac5-846e-4895-a05f-0c39613f2ea8" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 16:38:17 crc kubenswrapper[4716]: I1209 16:38:17.032379 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="0fd09ac5-846e-4895-a05f-0c39613f2ea8" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 09 16:38:17 crc kubenswrapper[4716]: I1209 16:38:17.033309 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6kmv6/crc-debug-n8gxh" Dec 09 16:38:17 crc kubenswrapper[4716]: I1209 16:38:17.036107 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-6kmv6"/"default-dockercfg-zpwdf" Dec 09 16:38:17 crc kubenswrapper[4716]: I1209 16:38:17.164318 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rwzcl\" (UniqueName: \"kubernetes.io/projected/17e7570d-2647-4d23-a88d-9f11f27b465b-kube-api-access-rwzcl\") pod \"crc-debug-n8gxh\" (UID: \"17e7570d-2647-4d23-a88d-9f11f27b465b\") " pod="openshift-must-gather-6kmv6/crc-debug-n8gxh" Dec 09 16:38:17 crc kubenswrapper[4716]: I1209 16:38:17.164647 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/17e7570d-2647-4d23-a88d-9f11f27b465b-host\") pod \"crc-debug-n8gxh\" (UID: \"17e7570d-2647-4d23-a88d-9f11f27b465b\") " pod="openshift-must-gather-6kmv6/crc-debug-n8gxh" Dec 09 16:38:17 crc kubenswrapper[4716]: I1209 16:38:17.214175 4716 scope.go:117] "RemoveContainer" containerID="dda6ec4e8c901cf880782dc4aa7fc34b02c4276f74fb441e2350127e4d167244" Dec 09 16:38:17 crc kubenswrapper[4716]: E1209 16:38:17.214486 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:38:17 crc kubenswrapper[4716]: I1209 16:38:17.267843 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/17e7570d-2647-4d23-a88d-9f11f27b465b-host\") pod \"crc-debug-n8gxh\" (UID: \"17e7570d-2647-4d23-a88d-9f11f27b465b\") " pod="openshift-must-gather-6kmv6/crc-debug-n8gxh" Dec 09 16:38:17 crc kubenswrapper[4716]: I1209 16:38:17.268179 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rwzcl\" (UniqueName: \"kubernetes.io/projected/17e7570d-2647-4d23-a88d-9f11f27b465b-kube-api-access-rwzcl\") pod \"crc-debug-n8gxh\" (UID: \"17e7570d-2647-4d23-a88d-9f11f27b465b\") " pod="openshift-must-gather-6kmv6/crc-debug-n8gxh" Dec 09 16:38:17 crc kubenswrapper[4716]: I1209 16:38:17.268857 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/17e7570d-2647-4d23-a88d-9f11f27b465b-host\") pod \"crc-debug-n8gxh\" (UID: \"17e7570d-2647-4d23-a88d-9f11f27b465b\") " pod="openshift-must-gather-6kmv6/crc-debug-n8gxh" Dec 09 16:38:17 crc kubenswrapper[4716]: I1209 16:38:17.289814 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rwzcl\" (UniqueName: \"kubernetes.io/projected/17e7570d-2647-4d23-a88d-9f11f27b465b-kube-api-access-rwzcl\") pod \"crc-debug-n8gxh\" (UID: \"17e7570d-2647-4d23-a88d-9f11f27b465b\") " pod="openshift-must-gather-6kmv6/crc-debug-n8gxh" Dec 09 16:38:17 crc kubenswrapper[4716]: I1209 16:38:17.352942 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6kmv6/crc-debug-n8gxh" Dec 09 16:38:17 crc kubenswrapper[4716]: W1209 16:38:17.399615 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod17e7570d_2647_4d23_a88d_9f11f27b465b.slice/crio-cceeeb3139e39d501e90f997d191312dd61fe5a2a74bae5846c330bcc3eaaa7f WatchSource:0}: Error finding container cceeeb3139e39d501e90f997d191312dd61fe5a2a74bae5846c330bcc3eaaa7f: Status 404 returned error can't find the container with id cceeeb3139e39d501e90f997d191312dd61fe5a2a74bae5846c330bcc3eaaa7f Dec 09 16:38:17 crc kubenswrapper[4716]: I1209 16:38:17.621257 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6kmv6/crc-debug-n8gxh" event={"ID":"17e7570d-2647-4d23-a88d-9f11f27b465b","Type":"ContainerStarted","Data":"cceeeb3139e39d501e90f997d191312dd61fe5a2a74bae5846c330bcc3eaaa7f"} Dec 09 16:38:22 crc kubenswrapper[4716]: I1209 16:38:22.448864 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-5trr8" Dec 09 16:38:22 crc kubenswrapper[4716]: I1209 16:38:22.449484 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-5trr8" Dec 09 16:38:22 crc kubenswrapper[4716]: I1209 16:38:22.533889 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-5trr8" Dec 09 16:38:22 crc kubenswrapper[4716]: I1209 16:38:22.735266 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-5trr8" Dec 09 16:38:22 crc kubenswrapper[4716]: I1209 16:38:22.794916 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5trr8"] Dec 09 16:38:24 crc kubenswrapper[4716]: I1209 16:38:24.743987 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-5trr8" podUID="51b61926-b287-4ed7-a460-ccb15f73d7aa" containerName="registry-server" containerID="cri-o://3efa5501f24ef0d367ab599aa7e2c48dd90fb083f7fe9f735f83cbcbbca4bcf1" gracePeriod=2 Dec 09 16:38:24 crc kubenswrapper[4716]: E1209 16:38:24.932040 4716 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod51b61926_b287_4ed7_a460_ccb15f73d7aa.slice/crio-conmon-3efa5501f24ef0d367ab599aa7e2c48dd90fb083f7fe9f735f83cbcbbca4bcf1.scope\": RecentStats: unable to find data in memory cache]" Dec 09 16:38:25 crc kubenswrapper[4716]: I1209 16:38:25.764370 4716 generic.go:334] "Generic (PLEG): container finished" podID="51b61926-b287-4ed7-a460-ccb15f73d7aa" containerID="3efa5501f24ef0d367ab599aa7e2c48dd90fb083f7fe9f735f83cbcbbca4bcf1" exitCode=0 Dec 09 16:38:25 crc kubenswrapper[4716]: I1209 16:38:25.764438 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5trr8" event={"ID":"51b61926-b287-4ed7-a460-ccb15f73d7aa","Type":"ContainerDied","Data":"3efa5501f24ef0d367ab599aa7e2c48dd90fb083f7fe9f735f83cbcbbca4bcf1"} Dec 09 16:38:28 crc kubenswrapper[4716]: E1209 16:38:28.218121 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:38:31 crc kubenswrapper[4716]: I1209 16:38:31.167840 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5trr8" Dec 09 16:38:31 crc kubenswrapper[4716]: I1209 16:38:31.217689 4716 scope.go:117] "RemoveContainer" containerID="dda6ec4e8c901cf880782dc4aa7fc34b02c4276f74fb441e2350127e4d167244" Dec 09 16:38:31 crc kubenswrapper[4716]: E1209 16:38:31.218235 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:38:31 crc kubenswrapper[4716]: E1209 16:38:31.219116 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:38:31 crc kubenswrapper[4716]: I1209 16:38:31.335050 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g5fl4\" (UniqueName: \"kubernetes.io/projected/51b61926-b287-4ed7-a460-ccb15f73d7aa-kube-api-access-g5fl4\") pod \"51b61926-b287-4ed7-a460-ccb15f73d7aa\" (UID: \"51b61926-b287-4ed7-a460-ccb15f73d7aa\") " Dec 09 16:38:31 crc kubenswrapper[4716]: I1209 16:38:31.335384 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51b61926-b287-4ed7-a460-ccb15f73d7aa-catalog-content\") pod \"51b61926-b287-4ed7-a460-ccb15f73d7aa\" (UID: \"51b61926-b287-4ed7-a460-ccb15f73d7aa\") " Dec 09 16:38:31 crc kubenswrapper[4716]: I1209 16:38:31.335468 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51b61926-b287-4ed7-a460-ccb15f73d7aa-utilities\") pod \"51b61926-b287-4ed7-a460-ccb15f73d7aa\" (UID: \"51b61926-b287-4ed7-a460-ccb15f73d7aa\") " Dec 09 16:38:31 crc kubenswrapper[4716]: I1209 16:38:31.336476 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/51b61926-b287-4ed7-a460-ccb15f73d7aa-utilities" (OuterVolumeSpecName: "utilities") pod "51b61926-b287-4ed7-a460-ccb15f73d7aa" (UID: "51b61926-b287-4ed7-a460-ccb15f73d7aa"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:38:31 crc kubenswrapper[4716]: I1209 16:38:31.370862 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51b61926-b287-4ed7-a460-ccb15f73d7aa-kube-api-access-g5fl4" (OuterVolumeSpecName: "kube-api-access-g5fl4") pod "51b61926-b287-4ed7-a460-ccb15f73d7aa" (UID: "51b61926-b287-4ed7-a460-ccb15f73d7aa"). InnerVolumeSpecName "kube-api-access-g5fl4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:38:31 crc kubenswrapper[4716]: I1209 16:38:31.411762 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/51b61926-b287-4ed7-a460-ccb15f73d7aa-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "51b61926-b287-4ed7-a460-ccb15f73d7aa" (UID: "51b61926-b287-4ed7-a460-ccb15f73d7aa"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:38:31 crc kubenswrapper[4716]: I1209 16:38:31.438970 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g5fl4\" (UniqueName: \"kubernetes.io/projected/51b61926-b287-4ed7-a460-ccb15f73d7aa-kube-api-access-g5fl4\") on node \"crc\" DevicePath \"\"" Dec 09 16:38:31 crc kubenswrapper[4716]: I1209 16:38:31.439016 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51b61926-b287-4ed7-a460-ccb15f73d7aa-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 16:38:31 crc kubenswrapper[4716]: I1209 16:38:31.439028 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51b61926-b287-4ed7-a460-ccb15f73d7aa-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 16:38:31 crc kubenswrapper[4716]: I1209 16:38:31.841641 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5trr8" event={"ID":"51b61926-b287-4ed7-a460-ccb15f73d7aa","Type":"ContainerDied","Data":"37d83b90035785349e2d81fe0234d8899a643f76e406c4aa19d8bb47ddbc4cbe"} Dec 09 16:38:31 crc kubenswrapper[4716]: I1209 16:38:31.841683 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5trr8" Dec 09 16:38:31 crc kubenswrapper[4716]: I1209 16:38:31.842043 4716 scope.go:117] "RemoveContainer" containerID="3efa5501f24ef0d367ab599aa7e2c48dd90fb083f7fe9f735f83cbcbbca4bcf1" Dec 09 16:38:31 crc kubenswrapper[4716]: I1209 16:38:31.846969 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6kmv6/crc-debug-n8gxh" event={"ID":"17e7570d-2647-4d23-a88d-9f11f27b465b","Type":"ContainerStarted","Data":"c4540e925e0308a1a9fb8cb88505378819683762b733002e41a631e438dcff11"} Dec 09 16:38:31 crc kubenswrapper[4716]: I1209 16:38:31.866801 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-6kmv6/crc-debug-n8gxh" podStartSLOduration=1.506876692 podStartE2EDuration="14.866777974s" podCreationTimestamp="2025-12-09 16:38:17 +0000 UTC" firstStartedPulling="2025-12-09 16:38:17.402463268 +0000 UTC m=+5384.557207256" lastFinishedPulling="2025-12-09 16:38:30.76236455 +0000 UTC m=+5397.917108538" observedRunningTime="2025-12-09 16:38:31.86380866 +0000 UTC m=+5399.018552648" watchObservedRunningTime="2025-12-09 16:38:31.866777974 +0000 UTC m=+5399.021521962" Dec 09 16:38:31 crc kubenswrapper[4716]: I1209 16:38:31.867865 4716 scope.go:117] "RemoveContainer" containerID="c7433b618f79090fe26c0eaaef003d9b463ab5c2d2f3ca7f219e17c611a8c2d9" Dec 09 16:38:31 crc kubenswrapper[4716]: I1209 16:38:31.893663 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5trr8"] Dec 09 16:38:31 crc kubenswrapper[4716]: I1209 16:38:31.905126 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-5trr8"] Dec 09 16:38:31 crc kubenswrapper[4716]: I1209 16:38:31.910777 4716 scope.go:117] "RemoveContainer" containerID="100ebcc497c8e2cf00006820a6014306721f5fa5ef6de089729ab363558894bb" Dec 09 16:38:33 crc kubenswrapper[4716]: I1209 16:38:33.272250 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="51b61926-b287-4ed7-a460-ccb15f73d7aa" path="/var/lib/kubelet/pods/51b61926-b287-4ed7-a460-ccb15f73d7aa/volumes" Dec 09 16:38:42 crc kubenswrapper[4716]: E1209 16:38:42.245094 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:38:44 crc kubenswrapper[4716]: I1209 16:38:44.214765 4716 scope.go:117] "RemoveContainer" containerID="dda6ec4e8c901cf880782dc4aa7fc34b02c4276f74fb441e2350127e4d167244" Dec 09 16:38:44 crc kubenswrapper[4716]: E1209 16:38:44.215455 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:38:44 crc kubenswrapper[4716]: E1209 16:38:44.216272 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:38:44 crc kubenswrapper[4716]: I1209 16:38:44.844999 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-6jjmj"] Dec 09 16:38:44 crc kubenswrapper[4716]: E1209 16:38:44.845548 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51b61926-b287-4ed7-a460-ccb15f73d7aa" containerName="extract-utilities" Dec 09 16:38:44 crc kubenswrapper[4716]: I1209 16:38:44.845571 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="51b61926-b287-4ed7-a460-ccb15f73d7aa" containerName="extract-utilities" Dec 09 16:38:44 crc kubenswrapper[4716]: E1209 16:38:44.845643 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51b61926-b287-4ed7-a460-ccb15f73d7aa" containerName="registry-server" Dec 09 16:38:44 crc kubenswrapper[4716]: I1209 16:38:44.845653 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="51b61926-b287-4ed7-a460-ccb15f73d7aa" containerName="registry-server" Dec 09 16:38:44 crc kubenswrapper[4716]: E1209 16:38:44.845674 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51b61926-b287-4ed7-a460-ccb15f73d7aa" containerName="extract-content" Dec 09 16:38:44 crc kubenswrapper[4716]: I1209 16:38:44.845681 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="51b61926-b287-4ed7-a460-ccb15f73d7aa" containerName="extract-content" Dec 09 16:38:44 crc kubenswrapper[4716]: I1209 16:38:44.846018 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="51b61926-b287-4ed7-a460-ccb15f73d7aa" containerName="registry-server" Dec 09 16:38:44 crc kubenswrapper[4716]: I1209 16:38:44.848237 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6jjmj" Dec 09 16:38:44 crc kubenswrapper[4716]: I1209 16:38:44.866757 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6jjmj"] Dec 09 16:38:45 crc kubenswrapper[4716]: I1209 16:38:45.005394 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cw96d\" (UniqueName: \"kubernetes.io/projected/4c28c8a3-4208-4879-aba0-2c929c836566-kube-api-access-cw96d\") pod \"redhat-marketplace-6jjmj\" (UID: \"4c28c8a3-4208-4879-aba0-2c929c836566\") " pod="openshift-marketplace/redhat-marketplace-6jjmj" Dec 09 16:38:45 crc kubenswrapper[4716]: I1209 16:38:45.005609 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c28c8a3-4208-4879-aba0-2c929c836566-catalog-content\") pod \"redhat-marketplace-6jjmj\" (UID: \"4c28c8a3-4208-4879-aba0-2c929c836566\") " pod="openshift-marketplace/redhat-marketplace-6jjmj" Dec 09 16:38:45 crc kubenswrapper[4716]: I1209 16:38:45.006106 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c28c8a3-4208-4879-aba0-2c929c836566-utilities\") pod \"redhat-marketplace-6jjmj\" (UID: \"4c28c8a3-4208-4879-aba0-2c929c836566\") " pod="openshift-marketplace/redhat-marketplace-6jjmj" Dec 09 16:38:45 crc kubenswrapper[4716]: I1209 16:38:45.108851 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c28c8a3-4208-4879-aba0-2c929c836566-utilities\") pod \"redhat-marketplace-6jjmj\" (UID: \"4c28c8a3-4208-4879-aba0-2c929c836566\") " pod="openshift-marketplace/redhat-marketplace-6jjmj" Dec 09 16:38:45 crc kubenswrapper[4716]: I1209 16:38:45.109011 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cw96d\" (UniqueName: \"kubernetes.io/projected/4c28c8a3-4208-4879-aba0-2c929c836566-kube-api-access-cw96d\") pod \"redhat-marketplace-6jjmj\" (UID: \"4c28c8a3-4208-4879-aba0-2c929c836566\") " pod="openshift-marketplace/redhat-marketplace-6jjmj" Dec 09 16:38:45 crc kubenswrapper[4716]: I1209 16:38:45.109093 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c28c8a3-4208-4879-aba0-2c929c836566-catalog-content\") pod \"redhat-marketplace-6jjmj\" (UID: \"4c28c8a3-4208-4879-aba0-2c929c836566\") " pod="openshift-marketplace/redhat-marketplace-6jjmj" Dec 09 16:38:45 crc kubenswrapper[4716]: I1209 16:38:45.109521 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c28c8a3-4208-4879-aba0-2c929c836566-utilities\") pod \"redhat-marketplace-6jjmj\" (UID: \"4c28c8a3-4208-4879-aba0-2c929c836566\") " pod="openshift-marketplace/redhat-marketplace-6jjmj" Dec 09 16:38:45 crc kubenswrapper[4716]: I1209 16:38:45.109542 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c28c8a3-4208-4879-aba0-2c929c836566-catalog-content\") pod \"redhat-marketplace-6jjmj\" (UID: \"4c28c8a3-4208-4879-aba0-2c929c836566\") " pod="openshift-marketplace/redhat-marketplace-6jjmj" Dec 09 16:38:45 crc kubenswrapper[4716]: I1209 16:38:45.132478 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cw96d\" (UniqueName: \"kubernetes.io/projected/4c28c8a3-4208-4879-aba0-2c929c836566-kube-api-access-cw96d\") pod \"redhat-marketplace-6jjmj\" (UID: \"4c28c8a3-4208-4879-aba0-2c929c836566\") " pod="openshift-marketplace/redhat-marketplace-6jjmj" Dec 09 16:38:45 crc kubenswrapper[4716]: I1209 16:38:45.208732 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6jjmj" Dec 09 16:38:46 crc kubenswrapper[4716]: I1209 16:38:46.819237 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6jjmj"] Dec 09 16:38:47 crc kubenswrapper[4716]: I1209 16:38:47.023751 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6jjmj" event={"ID":"4c28c8a3-4208-4879-aba0-2c929c836566","Type":"ContainerStarted","Data":"51c74a5a5c6c754ec5452ebb93dca37fc8baae92bff934da1418811bc7454abd"} Dec 09 16:38:48 crc kubenswrapper[4716]: I1209 16:38:48.038695 4716 generic.go:334] "Generic (PLEG): container finished" podID="4c28c8a3-4208-4879-aba0-2c929c836566" containerID="38b7a8325c8703aef9bf79040d01c29e0243264b1cc33c937e4bbf149d07bf64" exitCode=0 Dec 09 16:38:48 crc kubenswrapper[4716]: I1209 16:38:48.038812 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6jjmj" event={"ID":"4c28c8a3-4208-4879-aba0-2c929c836566","Type":"ContainerDied","Data":"38b7a8325c8703aef9bf79040d01c29e0243264b1cc33c937e4bbf149d07bf64"} Dec 09 16:38:50 crc kubenswrapper[4716]: I1209 16:38:50.069460 4716 generic.go:334] "Generic (PLEG): container finished" podID="4c28c8a3-4208-4879-aba0-2c929c836566" containerID="267d91d9456a314da8f38aef9ab3db235930db5e0538bde60d66ea1f46440e4c" exitCode=0 Dec 09 16:38:50 crc kubenswrapper[4716]: I1209 16:38:50.069578 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6jjmj" event={"ID":"4c28c8a3-4208-4879-aba0-2c929c836566","Type":"ContainerDied","Data":"267d91d9456a314da8f38aef9ab3db235930db5e0538bde60d66ea1f46440e4c"} Dec 09 16:38:52 crc kubenswrapper[4716]: I1209 16:38:52.097131 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6jjmj" event={"ID":"4c28c8a3-4208-4879-aba0-2c929c836566","Type":"ContainerStarted","Data":"a539425ea77b410dd1b70cf978037eeddbfbbc4ea57dc532ce2cde3452aad438"} Dec 09 16:38:52 crc kubenswrapper[4716]: I1209 16:38:52.121168 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-6jjmj" podStartSLOduration=4.368104048 podStartE2EDuration="8.121143535s" podCreationTimestamp="2025-12-09 16:38:44 +0000 UTC" firstStartedPulling="2025-12-09 16:38:48.040748198 +0000 UTC m=+5415.195492186" lastFinishedPulling="2025-12-09 16:38:51.793787685 +0000 UTC m=+5418.948531673" observedRunningTime="2025-12-09 16:38:52.116010699 +0000 UTC m=+5419.270754687" watchObservedRunningTime="2025-12-09 16:38:52.121143535 +0000 UTC m=+5419.275887523" Dec 09 16:38:54 crc kubenswrapper[4716]: E1209 16:38:54.216405 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:38:55 crc kubenswrapper[4716]: I1209 16:38:55.209062 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-6jjmj" Dec 09 16:38:55 crc kubenswrapper[4716]: I1209 16:38:55.209409 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-6jjmj" Dec 09 16:38:55 crc kubenswrapper[4716]: I1209 16:38:55.214231 4716 scope.go:117] "RemoveContainer" containerID="dda6ec4e8c901cf880782dc4aa7fc34b02c4276f74fb441e2350127e4d167244" Dec 09 16:38:55 crc kubenswrapper[4716]: E1209 16:38:55.214884 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:38:56 crc kubenswrapper[4716]: I1209 16:38:56.271966 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-6jjmj" podUID="4c28c8a3-4208-4879-aba0-2c929c836566" containerName="registry-server" probeResult="failure" output=< Dec 09 16:38:56 crc kubenswrapper[4716]: timeout: failed to connect service ":50051" within 1s Dec 09 16:38:56 crc kubenswrapper[4716]: > Dec 09 16:38:57 crc kubenswrapper[4716]: I1209 16:38:57.153582 4716 generic.go:334] "Generic (PLEG): container finished" podID="17e7570d-2647-4d23-a88d-9f11f27b465b" containerID="c4540e925e0308a1a9fb8cb88505378819683762b733002e41a631e438dcff11" exitCode=0 Dec 09 16:38:57 crc kubenswrapper[4716]: I1209 16:38:57.153673 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6kmv6/crc-debug-n8gxh" event={"ID":"17e7570d-2647-4d23-a88d-9f11f27b465b","Type":"ContainerDied","Data":"c4540e925e0308a1a9fb8cb88505378819683762b733002e41a631e438dcff11"} Dec 09 16:38:58 crc kubenswrapper[4716]: E1209 16:38:58.217843 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:38:58 crc kubenswrapper[4716]: I1209 16:38:58.333888 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6kmv6/crc-debug-n8gxh" Dec 09 16:38:58 crc kubenswrapper[4716]: I1209 16:38:58.374168 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-6kmv6/crc-debug-n8gxh"] Dec 09 16:38:58 crc kubenswrapper[4716]: I1209 16:38:58.388187 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-6kmv6/crc-debug-n8gxh"] Dec 09 16:38:58 crc kubenswrapper[4716]: I1209 16:38:58.450857 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rwzcl\" (UniqueName: \"kubernetes.io/projected/17e7570d-2647-4d23-a88d-9f11f27b465b-kube-api-access-rwzcl\") pod \"17e7570d-2647-4d23-a88d-9f11f27b465b\" (UID: \"17e7570d-2647-4d23-a88d-9f11f27b465b\") " Dec 09 16:38:58 crc kubenswrapper[4716]: I1209 16:38:58.451197 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/17e7570d-2647-4d23-a88d-9f11f27b465b-host\") pod \"17e7570d-2647-4d23-a88d-9f11f27b465b\" (UID: \"17e7570d-2647-4d23-a88d-9f11f27b465b\") " Dec 09 16:38:58 crc kubenswrapper[4716]: I1209 16:38:58.451805 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/17e7570d-2647-4d23-a88d-9f11f27b465b-host" (OuterVolumeSpecName: "host") pod "17e7570d-2647-4d23-a88d-9f11f27b465b" (UID: "17e7570d-2647-4d23-a88d-9f11f27b465b"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 16:38:58 crc kubenswrapper[4716]: I1209 16:38:58.452335 4716 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/17e7570d-2647-4d23-a88d-9f11f27b465b-host\") on node \"crc\" DevicePath \"\"" Dec 09 16:38:58 crc kubenswrapper[4716]: I1209 16:38:58.459833 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/17e7570d-2647-4d23-a88d-9f11f27b465b-kube-api-access-rwzcl" (OuterVolumeSpecName: "kube-api-access-rwzcl") pod "17e7570d-2647-4d23-a88d-9f11f27b465b" (UID: "17e7570d-2647-4d23-a88d-9f11f27b465b"). InnerVolumeSpecName "kube-api-access-rwzcl". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:38:58 crc kubenswrapper[4716]: I1209 16:38:58.555149 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rwzcl\" (UniqueName: \"kubernetes.io/projected/17e7570d-2647-4d23-a88d-9f11f27b465b-kube-api-access-rwzcl\") on node \"crc\" DevicePath \"\"" Dec 09 16:38:59 crc kubenswrapper[4716]: I1209 16:38:59.198366 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cceeeb3139e39d501e90f997d191312dd61fe5a2a74bae5846c330bcc3eaaa7f" Dec 09 16:38:59 crc kubenswrapper[4716]: I1209 16:38:59.198466 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6kmv6/crc-debug-n8gxh" Dec 09 16:38:59 crc kubenswrapper[4716]: I1209 16:38:59.228294 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="17e7570d-2647-4d23-a88d-9f11f27b465b" path="/var/lib/kubelet/pods/17e7570d-2647-4d23-a88d-9f11f27b465b/volumes" Dec 09 16:38:59 crc kubenswrapper[4716]: I1209 16:38:59.648751 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-6kmv6/crc-debug-wl6x6"] Dec 09 16:38:59 crc kubenswrapper[4716]: E1209 16:38:59.649735 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17e7570d-2647-4d23-a88d-9f11f27b465b" containerName="container-00" Dec 09 16:38:59 crc kubenswrapper[4716]: I1209 16:38:59.649759 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="17e7570d-2647-4d23-a88d-9f11f27b465b" containerName="container-00" Dec 09 16:38:59 crc kubenswrapper[4716]: I1209 16:38:59.650137 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="17e7570d-2647-4d23-a88d-9f11f27b465b" containerName="container-00" Dec 09 16:38:59 crc kubenswrapper[4716]: I1209 16:38:59.651224 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6kmv6/crc-debug-wl6x6" Dec 09 16:38:59 crc kubenswrapper[4716]: I1209 16:38:59.653528 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-6kmv6"/"default-dockercfg-zpwdf" Dec 09 16:38:59 crc kubenswrapper[4716]: I1209 16:38:59.783784 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zc8f2\" (UniqueName: \"kubernetes.io/projected/104cf567-6f5a-40f3-b157-8f43300a8492-kube-api-access-zc8f2\") pod \"crc-debug-wl6x6\" (UID: \"104cf567-6f5a-40f3-b157-8f43300a8492\") " pod="openshift-must-gather-6kmv6/crc-debug-wl6x6" Dec 09 16:38:59 crc kubenswrapper[4716]: I1209 16:38:59.784482 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/104cf567-6f5a-40f3-b157-8f43300a8492-host\") pod \"crc-debug-wl6x6\" (UID: \"104cf567-6f5a-40f3-b157-8f43300a8492\") " pod="openshift-must-gather-6kmv6/crc-debug-wl6x6" Dec 09 16:38:59 crc kubenswrapper[4716]: I1209 16:38:59.887093 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zc8f2\" (UniqueName: \"kubernetes.io/projected/104cf567-6f5a-40f3-b157-8f43300a8492-kube-api-access-zc8f2\") pod \"crc-debug-wl6x6\" (UID: \"104cf567-6f5a-40f3-b157-8f43300a8492\") " pod="openshift-must-gather-6kmv6/crc-debug-wl6x6" Dec 09 16:38:59 crc kubenswrapper[4716]: I1209 16:38:59.887401 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/104cf567-6f5a-40f3-b157-8f43300a8492-host\") pod \"crc-debug-wl6x6\" (UID: \"104cf567-6f5a-40f3-b157-8f43300a8492\") " pod="openshift-must-gather-6kmv6/crc-debug-wl6x6" Dec 09 16:38:59 crc kubenswrapper[4716]: I1209 16:38:59.887491 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/104cf567-6f5a-40f3-b157-8f43300a8492-host\") pod \"crc-debug-wl6x6\" (UID: \"104cf567-6f5a-40f3-b157-8f43300a8492\") " pod="openshift-must-gather-6kmv6/crc-debug-wl6x6" Dec 09 16:38:59 crc kubenswrapper[4716]: I1209 16:38:59.909722 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zc8f2\" (UniqueName: \"kubernetes.io/projected/104cf567-6f5a-40f3-b157-8f43300a8492-kube-api-access-zc8f2\") pod \"crc-debug-wl6x6\" (UID: \"104cf567-6f5a-40f3-b157-8f43300a8492\") " pod="openshift-must-gather-6kmv6/crc-debug-wl6x6" Dec 09 16:38:59 crc kubenswrapper[4716]: I1209 16:38:59.972376 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6kmv6/crc-debug-wl6x6" Dec 09 16:39:00 crc kubenswrapper[4716]: W1209 16:39:00.023649 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod104cf567_6f5a_40f3_b157_8f43300a8492.slice/crio-6d522f811c93cbce94379904b706d7594f9ec8c99d0611895bc6958cfe7c5b67 WatchSource:0}: Error finding container 6d522f811c93cbce94379904b706d7594f9ec8c99d0611895bc6958cfe7c5b67: Status 404 returned error can't find the container with id 6d522f811c93cbce94379904b706d7594f9ec8c99d0611895bc6958cfe7c5b67 Dec 09 16:39:00 crc kubenswrapper[4716]: I1209 16:39:00.211398 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6kmv6/crc-debug-wl6x6" event={"ID":"104cf567-6f5a-40f3-b157-8f43300a8492","Type":"ContainerStarted","Data":"6d522f811c93cbce94379904b706d7594f9ec8c99d0611895bc6958cfe7c5b67"} Dec 09 16:39:01 crc kubenswrapper[4716]: I1209 16:39:01.245248 4716 generic.go:334] "Generic (PLEG): container finished" podID="104cf567-6f5a-40f3-b157-8f43300a8492" containerID="cff89d62734894deb3cbf9afd3abaec574af6e63c8dce71b1d1a9e43fa9b0163" exitCode=1 Dec 09 16:39:01 crc kubenswrapper[4716]: I1209 16:39:01.248855 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6kmv6/crc-debug-wl6x6" event={"ID":"104cf567-6f5a-40f3-b157-8f43300a8492","Type":"ContainerDied","Data":"cff89d62734894deb3cbf9afd3abaec574af6e63c8dce71b1d1a9e43fa9b0163"} Dec 09 16:39:01 crc kubenswrapper[4716]: I1209 16:39:01.308330 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-6kmv6/crc-debug-wl6x6"] Dec 09 16:39:01 crc kubenswrapper[4716]: I1209 16:39:01.323039 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-6kmv6/crc-debug-wl6x6"] Dec 09 16:39:02 crc kubenswrapper[4716]: I1209 16:39:02.409146 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6kmv6/crc-debug-wl6x6" Dec 09 16:39:02 crc kubenswrapper[4716]: I1209 16:39:02.449260 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/104cf567-6f5a-40f3-b157-8f43300a8492-host\") pod \"104cf567-6f5a-40f3-b157-8f43300a8492\" (UID: \"104cf567-6f5a-40f3-b157-8f43300a8492\") " Dec 09 16:39:02 crc kubenswrapper[4716]: I1209 16:39:02.449409 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/104cf567-6f5a-40f3-b157-8f43300a8492-host" (OuterVolumeSpecName: "host") pod "104cf567-6f5a-40f3-b157-8f43300a8492" (UID: "104cf567-6f5a-40f3-b157-8f43300a8492"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 09 16:39:02 crc kubenswrapper[4716]: I1209 16:39:02.449748 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zc8f2\" (UniqueName: \"kubernetes.io/projected/104cf567-6f5a-40f3-b157-8f43300a8492-kube-api-access-zc8f2\") pod \"104cf567-6f5a-40f3-b157-8f43300a8492\" (UID: \"104cf567-6f5a-40f3-b157-8f43300a8492\") " Dec 09 16:39:02 crc kubenswrapper[4716]: I1209 16:39:02.450707 4716 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/104cf567-6f5a-40f3-b157-8f43300a8492-host\") on node \"crc\" DevicePath \"\"" Dec 09 16:39:02 crc kubenswrapper[4716]: I1209 16:39:02.466523 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/104cf567-6f5a-40f3-b157-8f43300a8492-kube-api-access-zc8f2" (OuterVolumeSpecName: "kube-api-access-zc8f2") pod "104cf567-6f5a-40f3-b157-8f43300a8492" (UID: "104cf567-6f5a-40f3-b157-8f43300a8492"). InnerVolumeSpecName "kube-api-access-zc8f2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:39:02 crc kubenswrapper[4716]: I1209 16:39:02.552692 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zc8f2\" (UniqueName: \"kubernetes.io/projected/104cf567-6f5a-40f3-b157-8f43300a8492-kube-api-access-zc8f2\") on node \"crc\" DevicePath \"\"" Dec 09 16:39:03 crc kubenswrapper[4716]: I1209 16:39:03.256585 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="104cf567-6f5a-40f3-b157-8f43300a8492" path="/var/lib/kubelet/pods/104cf567-6f5a-40f3-b157-8f43300a8492/volumes" Dec 09 16:39:03 crc kubenswrapper[4716]: I1209 16:39:03.268039 4716 scope.go:117] "RemoveContainer" containerID="cff89d62734894deb3cbf9afd3abaec574af6e63c8dce71b1d1a9e43fa9b0163" Dec 09 16:39:03 crc kubenswrapper[4716]: I1209 16:39:03.268157 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6kmv6/crc-debug-wl6x6" Dec 09 16:39:05 crc kubenswrapper[4716]: I1209 16:39:05.269092 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-6jjmj" Dec 09 16:39:05 crc kubenswrapper[4716]: I1209 16:39:05.320606 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-6jjmj" Dec 09 16:39:05 crc kubenswrapper[4716]: I1209 16:39:05.507694 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6jjmj"] Dec 09 16:39:06 crc kubenswrapper[4716]: I1209 16:39:06.316591 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-6jjmj" podUID="4c28c8a3-4208-4879-aba0-2c929c836566" containerName="registry-server" containerID="cri-o://a539425ea77b410dd1b70cf978037eeddbfbbc4ea57dc532ce2cde3452aad438" gracePeriod=2 Dec 09 16:39:07 crc kubenswrapper[4716]: I1209 16:39:07.329419 4716 generic.go:334] "Generic (PLEG): container finished" podID="4c28c8a3-4208-4879-aba0-2c929c836566" containerID="a539425ea77b410dd1b70cf978037eeddbfbbc4ea57dc532ce2cde3452aad438" exitCode=0 Dec 09 16:39:07 crc kubenswrapper[4716]: I1209 16:39:07.329520 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6jjmj" event={"ID":"4c28c8a3-4208-4879-aba0-2c929c836566","Type":"ContainerDied","Data":"a539425ea77b410dd1b70cf978037eeddbfbbc4ea57dc532ce2cde3452aad438"} Dec 09 16:39:07 crc kubenswrapper[4716]: I1209 16:39:07.856247 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6jjmj" Dec 09 16:39:07 crc kubenswrapper[4716]: I1209 16:39:07.912226 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cw96d\" (UniqueName: \"kubernetes.io/projected/4c28c8a3-4208-4879-aba0-2c929c836566-kube-api-access-cw96d\") pod \"4c28c8a3-4208-4879-aba0-2c929c836566\" (UID: \"4c28c8a3-4208-4879-aba0-2c929c836566\") " Dec 09 16:39:07 crc kubenswrapper[4716]: I1209 16:39:07.912483 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c28c8a3-4208-4879-aba0-2c929c836566-utilities\") pod \"4c28c8a3-4208-4879-aba0-2c929c836566\" (UID: \"4c28c8a3-4208-4879-aba0-2c929c836566\") " Dec 09 16:39:07 crc kubenswrapper[4716]: I1209 16:39:07.912517 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c28c8a3-4208-4879-aba0-2c929c836566-catalog-content\") pod \"4c28c8a3-4208-4879-aba0-2c929c836566\" (UID: \"4c28c8a3-4208-4879-aba0-2c929c836566\") " Dec 09 16:39:07 crc kubenswrapper[4716]: I1209 16:39:07.913228 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c28c8a3-4208-4879-aba0-2c929c836566-utilities" (OuterVolumeSpecName: "utilities") pod "4c28c8a3-4208-4879-aba0-2c929c836566" (UID: "4c28c8a3-4208-4879-aba0-2c929c836566"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:39:07 crc kubenswrapper[4716]: I1209 16:39:07.921119 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c28c8a3-4208-4879-aba0-2c929c836566-kube-api-access-cw96d" (OuterVolumeSpecName: "kube-api-access-cw96d") pod "4c28c8a3-4208-4879-aba0-2c929c836566" (UID: "4c28c8a3-4208-4879-aba0-2c929c836566"). InnerVolumeSpecName "kube-api-access-cw96d". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:39:07 crc kubenswrapper[4716]: I1209 16:39:07.946328 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c28c8a3-4208-4879-aba0-2c929c836566-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4c28c8a3-4208-4879-aba0-2c929c836566" (UID: "4c28c8a3-4208-4879-aba0-2c929c836566"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:39:08 crc kubenswrapper[4716]: I1209 16:39:08.016265 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cw96d\" (UniqueName: \"kubernetes.io/projected/4c28c8a3-4208-4879-aba0-2c929c836566-kube-api-access-cw96d\") on node \"crc\" DevicePath \"\"" Dec 09 16:39:08 crc kubenswrapper[4716]: I1209 16:39:08.016306 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c28c8a3-4208-4879-aba0-2c929c836566-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 16:39:08 crc kubenswrapper[4716]: I1209 16:39:08.016317 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c28c8a3-4208-4879-aba0-2c929c836566-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 16:39:08 crc kubenswrapper[4716]: I1209 16:39:08.340850 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6jjmj" event={"ID":"4c28c8a3-4208-4879-aba0-2c929c836566","Type":"ContainerDied","Data":"51c74a5a5c6c754ec5452ebb93dca37fc8baae92bff934da1418811bc7454abd"} Dec 09 16:39:08 crc kubenswrapper[4716]: I1209 16:39:08.340902 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6jjmj" Dec 09 16:39:08 crc kubenswrapper[4716]: I1209 16:39:08.340954 4716 scope.go:117] "RemoveContainer" containerID="a539425ea77b410dd1b70cf978037eeddbfbbc4ea57dc532ce2cde3452aad438" Dec 09 16:39:08 crc kubenswrapper[4716]: I1209 16:39:08.366666 4716 scope.go:117] "RemoveContainer" containerID="267d91d9456a314da8f38aef9ab3db235930db5e0538bde60d66ea1f46440e4c" Dec 09 16:39:08 crc kubenswrapper[4716]: I1209 16:39:08.380465 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6jjmj"] Dec 09 16:39:08 crc kubenswrapper[4716]: I1209 16:39:08.391334 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-6jjmj"] Dec 09 16:39:08 crc kubenswrapper[4716]: I1209 16:39:08.827666 4716 scope.go:117] "RemoveContainer" containerID="38b7a8325c8703aef9bf79040d01c29e0243264b1cc33c937e4bbf149d07bf64" Dec 09 16:39:09 crc kubenswrapper[4716]: I1209 16:39:09.214099 4716 scope.go:117] "RemoveContainer" containerID="dda6ec4e8c901cf880782dc4aa7fc34b02c4276f74fb441e2350127e4d167244" Dec 09 16:39:09 crc kubenswrapper[4716]: E1209 16:39:09.214858 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:39:09 crc kubenswrapper[4716]: E1209 16:39:09.215696 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:39:09 crc kubenswrapper[4716]: I1209 16:39:09.244514 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c28c8a3-4208-4879-aba0-2c929c836566" path="/var/lib/kubelet/pods/4c28c8a3-4208-4879-aba0-2c929c836566/volumes" Dec 09 16:39:12 crc kubenswrapper[4716]: I1209 16:39:12.216562 4716 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 09 16:39:12 crc kubenswrapper[4716]: E1209 16:39:12.341041 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 09 16:39:12 crc kubenswrapper[4716]: E1209 16:39:12.341149 4716 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 09 16:39:12 crc kubenswrapper[4716]: E1209 16:39:12.341370 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qfm49,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-msshl_openstack(239c4119-944c-46e1-9425-285eeb6e0204): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 16:39:12 crc kubenswrapper[4716]: E1209 16:39:12.342790 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:39:20 crc kubenswrapper[4716]: E1209 16:39:20.218790 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:39:23 crc kubenswrapper[4716]: E1209 16:39:23.224381 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:39:24 crc kubenswrapper[4716]: I1209 16:39:24.213665 4716 scope.go:117] "RemoveContainer" containerID="dda6ec4e8c901cf880782dc4aa7fc34b02c4276f74fb441e2350127e4d167244" Dec 09 16:39:24 crc kubenswrapper[4716]: E1209 16:39:24.214395 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:39:35 crc kubenswrapper[4716]: E1209 16:39:35.218047 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:39:35 crc kubenswrapper[4716]: E1209 16:39:35.335676 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 16:39:35 crc kubenswrapper[4716]: E1209 16:39:35.335991 4716 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 16:39:35 crc kubenswrapper[4716]: E1209 16:39:35.336231 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5fbh9fh77h667h669h94h589h59dh5bdh5bch9fh68h689h699h559h549h56fh5f5h68fh557h5cch7fh66ch6ch86h56h75h55fh58h5ffhf9h5fdq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lz724,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(29cebb2d-8cdb-49de-a29d-1d02808e46a9): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 16:39:35 crc kubenswrapper[4716]: E1209 16:39:35.337511 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:39:39 crc kubenswrapper[4716]: I1209 16:39:39.214372 4716 scope.go:117] "RemoveContainer" containerID="dda6ec4e8c901cf880782dc4aa7fc34b02c4276f74fb441e2350127e4d167244" Dec 09 16:39:39 crc kubenswrapper[4716]: E1209 16:39:39.216504 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:39:47 crc kubenswrapper[4716]: E1209 16:39:47.215943 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:39:47 crc kubenswrapper[4716]: E1209 16:39:47.216096 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:39:50 crc kubenswrapper[4716]: I1209 16:39:50.213733 4716 scope.go:117] "RemoveContainer" containerID="dda6ec4e8c901cf880782dc4aa7fc34b02c4276f74fb441e2350127e4d167244" Dec 09 16:39:50 crc kubenswrapper[4716]: E1209 16:39:50.214505 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:39:50 crc kubenswrapper[4716]: I1209 16:39:50.700428 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_81faf0e1-4428-4ed2-8d1f-98d0fd11bc40/aodh-api/0.log" Dec 09 16:39:50 crc kubenswrapper[4716]: I1209 16:39:50.735412 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_81faf0e1-4428-4ed2-8d1f-98d0fd11bc40/aodh-evaluator/0.log" Dec 09 16:39:50 crc kubenswrapper[4716]: I1209 16:39:50.904438 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_81faf0e1-4428-4ed2-8d1f-98d0fd11bc40/aodh-listener/0.log" Dec 09 16:39:50 crc kubenswrapper[4716]: I1209 16:39:50.967903 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_81faf0e1-4428-4ed2-8d1f-98d0fd11bc40/aodh-notifier/0.log" Dec 09 16:39:51 crc kubenswrapper[4716]: I1209 16:39:51.009414 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-64c8798578-khfkb_d52e1a64-3a20-424c-8e65-bcb50625259a/barbican-api/0.log" Dec 09 16:39:51 crc kubenswrapper[4716]: I1209 16:39:51.113005 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-64c8798578-khfkb_d52e1a64-3a20-424c-8e65-bcb50625259a/barbican-api-log/0.log" Dec 09 16:39:51 crc kubenswrapper[4716]: I1209 16:39:51.175407 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-79554b67bb-ppw4d_af26f699-cc84-4ddf-a63a-450b6bb5cfa2/barbican-keystone-listener/0.log" Dec 09 16:39:51 crc kubenswrapper[4716]: I1209 16:39:51.260151 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-79554b67bb-ppw4d_af26f699-cc84-4ddf-a63a-450b6bb5cfa2/barbican-keystone-listener-log/0.log" Dec 09 16:39:51 crc kubenswrapper[4716]: I1209 16:39:51.385981 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-5456946d99-njfkq_1a338543-0982-4a3c-ac49-49f25a321870/barbican-worker/0.log" Dec 09 16:39:51 crc kubenswrapper[4716]: I1209 16:39:51.454838 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-5456946d99-njfkq_1a338543-0982-4a3c-ac49-49f25a321870/barbican-worker-log/0.log" Dec 09 16:39:51 crc kubenswrapper[4716]: I1209 16:39:51.622387 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-hblsv_7a03690d-fbfb-4a36-9b27-ca857fcc88f1/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Dec 09 16:39:51 crc kubenswrapper[4716]: I1209 16:39:51.759716 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_29cebb2d-8cdb-49de-a29d-1d02808e46a9/ceilometer-notification-agent/0.log" Dec 09 16:39:51 crc kubenswrapper[4716]: I1209 16:39:51.833455 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_29cebb2d-8cdb-49de-a29d-1d02808e46a9/proxy-httpd/0.log" Dec 09 16:39:51 crc kubenswrapper[4716]: I1209 16:39:51.858320 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_29cebb2d-8cdb-49de-a29d-1d02808e46a9/sg-core/0.log" Dec 09 16:39:52 crc kubenswrapper[4716]: I1209 16:39:52.036979 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_ebb94c0f-cef2-4960-8b18-c073029c813c/cinder-api/0.log" Dec 09 16:39:52 crc kubenswrapper[4716]: I1209 16:39:52.062914 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_ebb94c0f-cef2-4960-8b18-c073029c813c/cinder-api-log/0.log" Dec 09 16:39:52 crc kubenswrapper[4716]: I1209 16:39:52.242500 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_2343bb60-69e9-4d0d-95d3-0ecdfa36d42f/cinder-scheduler/0.log" Dec 09 16:39:52 crc kubenswrapper[4716]: I1209 16:39:52.298746 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_2343bb60-69e9-4d0d-95d3-0ecdfa36d42f/probe/0.log" Dec 09 16:39:52 crc kubenswrapper[4716]: I1209 16:39:52.369092 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-6f6df4f56c-r72fn_6cd1bcab-3e4a-41f7-97a2-19818a1f3415/init/0.log" Dec 09 16:39:52 crc kubenswrapper[4716]: I1209 16:39:52.561200 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-6f6df4f56c-r72fn_6cd1bcab-3e4a-41f7-97a2-19818a1f3415/init/0.log" Dec 09 16:39:52 crc kubenswrapper[4716]: I1209 16:39:52.584969 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-6f6df4f56c-r72fn_6cd1bcab-3e4a-41f7-97a2-19818a1f3415/dnsmasq-dns/0.log" Dec 09 16:39:52 crc kubenswrapper[4716]: I1209 16:39:52.647280 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-6vkdc_ff34b1b8-b04f-4c11-8e8b-ac80b98a6f9e/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Dec 09 16:39:52 crc kubenswrapper[4716]: I1209 16:39:52.836071 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-85wnn_3f192ee3-f23a-4d2f-b423-fee621bf273e/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Dec 09 16:39:52 crc kubenswrapper[4716]: I1209 16:39:52.857245 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-b8znx_56391e5b-15d4-4f61-9d57-6512b28a5254/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Dec 09 16:39:53 crc kubenswrapper[4716]: I1209 16:39:53.031915 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-cdztz_0fd09ac5-846e-4895-a05f-0c39613f2ea8/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Dec 09 16:39:53 crc kubenswrapper[4716]: I1209 16:39:53.071212 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-hrbp4_7a560de6-cdd4-41b4-af8c-0523cca3eed0/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Dec 09 16:39:53 crc kubenswrapper[4716]: I1209 16:39:53.282141 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-kp8f2_b22ffe51-8b47-48a6-9292-0577f680d56b/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Dec 09 16:39:53 crc kubenswrapper[4716]: I1209 16:39:53.348873 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-mzt6l_9b993fbb-2885-48c8-83b7-9e07287e790e/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Dec 09 16:39:54 crc kubenswrapper[4716]: I1209 16:39:54.375842 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_f3b8d20d-33fa-407b-9d95-17f9321fdf08/glance-log/0.log" Dec 09 16:39:54 crc kubenswrapper[4716]: I1209 16:39:54.418665 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_f3b8d20d-33fa-407b-9d95-17f9321fdf08/glance-httpd/0.log" Dec 09 16:39:54 crc kubenswrapper[4716]: I1209 16:39:54.467650 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_23f21e51-c98a-4198-9e0f-a4e7494c10e8/glance-httpd/0.log" Dec 09 16:39:54 crc kubenswrapper[4716]: I1209 16:39:54.593085 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_23f21e51-c98a-4198-9e0f-a4e7494c10e8/glance-log/0.log" Dec 09 16:39:55 crc kubenswrapper[4716]: I1209 16:39:55.157587 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-engine-7b685d4d7d-v9gv5_0a60d448-8732-476b-9caa-5d47786ebcbf/heat-engine/0.log" Dec 09 16:39:55 crc kubenswrapper[4716]: I1209 16:39:55.406200 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-api-5757f44d58-n4z9t_a54facb6-d928-4957-a382-5f77e827b336/heat-api/0.log" Dec 09 16:39:55 crc kubenswrapper[4716]: I1209 16:39:55.436423 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29421601-2t7qc_5342520e-c878-41a3-8238-efd62938086a/keystone-cron/0.log" Dec 09 16:39:55 crc kubenswrapper[4716]: I1209 16:39:55.459837 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-56fcdcb665-5z7hm_21c0272c-4b2c-41f2-8777-06d494046fb8/keystone-api/0.log" Dec 09 16:39:55 crc kubenswrapper[4716]: I1209 16:39:55.489475 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-cfnapi-76845fb89c-tcnr6_35e82a88-b5cc-4be0-a4b4-b654d67e103d/heat-cfnapi/0.log" Dec 09 16:39:56 crc kubenswrapper[4716]: I1209 16:39:56.080008 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_0d83e13f-52c5-4369-b2bc-ecb9df8b4baf/kube-state-metrics/0.log" Dec 09 16:39:56 crc kubenswrapper[4716]: I1209 16:39:56.373040 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mysqld-exporter-0_457d6079-52ef-43f8-8409-6e562a6e8f85/mysqld-exporter/0.log" Dec 09 16:39:56 crc kubenswrapper[4716]: I1209 16:39:56.461432 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-67d7f5448f-zcfm9_9d36793d-f026-47a5-a21f-1272aa1dbf58/neutron-api/0.log" Dec 09 16:39:56 crc kubenswrapper[4716]: I1209 16:39:56.491448 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-67d7f5448f-zcfm9_9d36793d-f026-47a5-a21f-1272aa1dbf58/neutron-httpd/0.log" Dec 09 16:39:56 crc kubenswrapper[4716]: I1209 16:39:56.867617 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_cc126966-2e68-43bc-a129-8f93723b78c1/nova-api-log/0.log" Dec 09 16:39:57 crc kubenswrapper[4716]: I1209 16:39:57.001452 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_06727d8d-6fe6-4146-87af-7cb45ea7e1e1/nova-cell0-conductor-conductor/0.log" Dec 09 16:39:57 crc kubenswrapper[4716]: I1209 16:39:57.223819 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_15289e69-4a3c-4832-bfc8-a2af3de4ca72/nova-cell1-conductor-conductor/0.log" Dec 09 16:39:57 crc kubenswrapper[4716]: I1209 16:39:57.329197 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_cc126966-2e68-43bc-a129-8f93723b78c1/nova-api-api/0.log" Dec 09 16:39:57 crc kubenswrapper[4716]: I1209 16:39:57.400278 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_7ad0a53e-9434-4af3-bfdc-b7a145293bec/nova-cell1-novncproxy-novncproxy/0.log" Dec 09 16:39:57 crc kubenswrapper[4716]: I1209 16:39:57.534702 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_24ac26ee-f25c-455c-ad9f-54a8991dd6f6/nova-metadata-log/0.log" Dec 09 16:39:57 crc kubenswrapper[4716]: I1209 16:39:57.901039 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_785bf387-d72e-49ad-8d43-9ef6c1475fb4/nova-scheduler-scheduler/0.log" Dec 09 16:39:57 crc kubenswrapper[4716]: I1209 16:39:57.942478 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_c32f00d5-f870-4dc6-8387-81bfe37b06f8/mysql-bootstrap/0.log" Dec 09 16:39:58 crc kubenswrapper[4716]: I1209 16:39:58.121076 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_c32f00d5-f870-4dc6-8387-81bfe37b06f8/galera/0.log" Dec 09 16:39:58 crc kubenswrapper[4716]: I1209 16:39:58.159631 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_c32f00d5-f870-4dc6-8387-81bfe37b06f8/mysql-bootstrap/0.log" Dec 09 16:39:58 crc kubenswrapper[4716]: E1209 16:39:58.218844 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:39:58 crc kubenswrapper[4716]: I1209 16:39:58.341223 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_b0be2722-84d6-4885-80bc-a795d7f2c05e/mysql-bootstrap/0.log" Dec 09 16:39:58 crc kubenswrapper[4716]: I1209 16:39:58.565686 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_b0be2722-84d6-4885-80bc-a795d7f2c05e/mysql-bootstrap/0.log" Dec 09 16:39:58 crc kubenswrapper[4716]: I1209 16:39:58.623464 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_b0be2722-84d6-4885-80bc-a795d7f2c05e/galera/0.log" Dec 09 16:39:58 crc kubenswrapper[4716]: I1209 16:39:58.789989 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_90ad0d77-c429-467b-a32d-46be1ccd1c9b/openstackclient/0.log" Dec 09 16:39:58 crc kubenswrapper[4716]: I1209 16:39:58.867750 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-8mm7h_890cdf81-6c51-4954-a7fc-ea6116941cfe/ovn-controller/0.log" Dec 09 16:39:59 crc kubenswrapper[4716]: I1209 16:39:59.074969 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-g6rkt_1fa68337-088e-40a6-8cc4-3b2b0947f959/openstack-network-exporter/0.log" Dec 09 16:39:59 crc kubenswrapper[4716]: I1209 16:39:59.272705 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-pf2c7_6f4610df-19db-432c-805c-2e8b52e5b344/ovsdb-server-init/0.log" Dec 09 16:39:59 crc kubenswrapper[4716]: I1209 16:39:59.512083 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-pf2c7_6f4610df-19db-432c-805c-2e8b52e5b344/ovs-vswitchd/0.log" Dec 09 16:39:59 crc kubenswrapper[4716]: I1209 16:39:59.540301 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-pf2c7_6f4610df-19db-432c-805c-2e8b52e5b344/ovsdb-server-init/0.log" Dec 09 16:39:59 crc kubenswrapper[4716]: I1209 16:39:59.564755 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-pf2c7_6f4610df-19db-432c-805c-2e8b52e5b344/ovsdb-server/0.log" Dec 09 16:39:59 crc kubenswrapper[4716]: I1209 16:39:59.710739 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_24ac26ee-f25c-455c-ad9f-54a8991dd6f6/nova-metadata-metadata/0.log" Dec 09 16:39:59 crc kubenswrapper[4716]: I1209 16:39:59.781233 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_7fa17075-24ea-4f90-9c45-8444a101b2ce/openstack-network-exporter/0.log" Dec 09 16:39:59 crc kubenswrapper[4716]: I1209 16:39:59.803523 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_7fa17075-24ea-4f90-9c45-8444a101b2ce/ovn-northd/0.log" Dec 09 16:39:59 crc kubenswrapper[4716]: I1209 16:39:59.984440 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_fa255b9e-51c8-407a-aebe-606da43b9906/openstack-network-exporter/0.log" Dec 09 16:39:59 crc kubenswrapper[4716]: I1209 16:39:59.993611 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_fa255b9e-51c8-407a-aebe-606da43b9906/ovsdbserver-nb/0.log" Dec 09 16:40:00 crc kubenswrapper[4716]: I1209 16:40:00.238538 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_07a42bcd-0edf-4215-a2ce-e5b66b8c09d7/openstack-network-exporter/0.log" Dec 09 16:40:00 crc kubenswrapper[4716]: I1209 16:40:00.252210 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_07a42bcd-0edf-4215-a2ce-e5b66b8c09d7/ovsdbserver-sb/0.log" Dec 09 16:40:00 crc kubenswrapper[4716]: I1209 16:40:00.370046 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-864d657bcd-fb9jd_1bd96021-5850-4f28-b8e7-666617201e0e/placement-api/0.log" Dec 09 16:40:00 crc kubenswrapper[4716]: I1209 16:40:00.518432 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-864d657bcd-fb9jd_1bd96021-5850-4f28-b8e7-666617201e0e/placement-log/0.log" Dec 09 16:40:00 crc kubenswrapper[4716]: I1209 16:40:00.625980 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_4076ed92-7690-4a97-b3d6-53a64842c96e/init-config-reloader/0.log" Dec 09 16:40:00 crc kubenswrapper[4716]: I1209 16:40:00.754187 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_4076ed92-7690-4a97-b3d6-53a64842c96e/init-config-reloader/0.log" Dec 09 16:40:00 crc kubenswrapper[4716]: I1209 16:40:00.785170 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_4076ed92-7690-4a97-b3d6-53a64842c96e/config-reloader/0.log" Dec 09 16:40:00 crc kubenswrapper[4716]: I1209 16:40:00.835416 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_4076ed92-7690-4a97-b3d6-53a64842c96e/prometheus/0.log" Dec 09 16:40:00 crc kubenswrapper[4716]: I1209 16:40:00.861985 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_4076ed92-7690-4a97-b3d6-53a64842c96e/thanos-sidecar/0.log" Dec 09 16:40:01 crc kubenswrapper[4716]: I1209 16:40:01.061165 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_b3fcad49-046e-4075-8906-c4629fc77587/setup-container/0.log" Dec 09 16:40:01 crc kubenswrapper[4716]: I1209 16:40:01.203951 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_b3fcad49-046e-4075-8906-c4629fc77587/setup-container/0.log" Dec 09 16:40:01 crc kubenswrapper[4716]: I1209 16:40:01.282938 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_b3fcad49-046e-4075-8906-c4629fc77587/rabbitmq/0.log" Dec 09 16:40:01 crc kubenswrapper[4716]: I1209 16:40:01.370887 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_fcc8c279-3c20-4050-95d5-5b71af2134cf/setup-container/0.log" Dec 09 16:40:01 crc kubenswrapper[4716]: I1209 16:40:01.587817 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_fcc8c279-3c20-4050-95d5-5b71af2134cf/rabbitmq/0.log" Dec 09 16:40:01 crc kubenswrapper[4716]: I1209 16:40:01.591078 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_fcc8c279-3c20-4050-95d5-5b71af2134cf/setup-container/0.log" Dec 09 16:40:01 crc kubenswrapper[4716]: I1209 16:40:01.663026 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-rpdr9_ee7f866e-a97e-410e-abf9-b67082ac802f/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Dec 09 16:40:01 crc kubenswrapper[4716]: I1209 16:40:01.842172 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-ldbjm_06d46d95-221a-40fb-a82e-0143ea9a6c91/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Dec 09 16:40:02 crc kubenswrapper[4716]: I1209 16:40:02.098450 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-7ff4fc547f-x4v24_d12a5bad-2a74-4efa-9db3-796dde581bc7/proxy-server/0.log" Dec 09 16:40:02 crc kubenswrapper[4716]: I1209 16:40:02.106553 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-7ff4fc547f-x4v24_d12a5bad-2a74-4efa-9db3-796dde581bc7/proxy-httpd/0.log" Dec 09 16:40:02 crc kubenswrapper[4716]: E1209 16:40:02.216908 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:40:02 crc kubenswrapper[4716]: I1209 16:40:02.226744 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-hkncs_5b9ed832-324f-4995-8277-6e979cfc7bc1/swift-ring-rebalance/0.log" Dec 09 16:40:02 crc kubenswrapper[4716]: I1209 16:40:02.319861 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_552d079e-332b-46db-946b-2777875f3dc7/account-auditor/0.log" Dec 09 16:40:02 crc kubenswrapper[4716]: I1209 16:40:02.393935 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_552d079e-332b-46db-946b-2777875f3dc7/account-reaper/0.log" Dec 09 16:40:02 crc kubenswrapper[4716]: I1209 16:40:02.549503 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_552d079e-332b-46db-946b-2777875f3dc7/account-replicator/0.log" Dec 09 16:40:02 crc kubenswrapper[4716]: I1209 16:40:02.580061 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_552d079e-332b-46db-946b-2777875f3dc7/account-server/0.log" Dec 09 16:40:02 crc kubenswrapper[4716]: I1209 16:40:02.597828 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_552d079e-332b-46db-946b-2777875f3dc7/container-auditor/0.log" Dec 09 16:40:02 crc kubenswrapper[4716]: I1209 16:40:02.715603 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_552d079e-332b-46db-946b-2777875f3dc7/container-replicator/0.log" Dec 09 16:40:02 crc kubenswrapper[4716]: I1209 16:40:02.790295 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_552d079e-332b-46db-946b-2777875f3dc7/container-server/0.log" Dec 09 16:40:02 crc kubenswrapper[4716]: I1209 16:40:02.792676 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_552d079e-332b-46db-946b-2777875f3dc7/container-updater/0.log" Dec 09 16:40:02 crc kubenswrapper[4716]: I1209 16:40:02.857437 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_552d079e-332b-46db-946b-2777875f3dc7/object-auditor/0.log" Dec 09 16:40:02 crc kubenswrapper[4716]: I1209 16:40:02.949304 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_552d079e-332b-46db-946b-2777875f3dc7/object-expirer/0.log" Dec 09 16:40:03 crc kubenswrapper[4716]: I1209 16:40:03.072813 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_552d079e-332b-46db-946b-2777875f3dc7/object-server/0.log" Dec 09 16:40:03 crc kubenswrapper[4716]: I1209 16:40:03.101697 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_552d079e-332b-46db-946b-2777875f3dc7/object-replicator/0.log" Dec 09 16:40:03 crc kubenswrapper[4716]: I1209 16:40:03.114116 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_552d079e-332b-46db-946b-2777875f3dc7/object-updater/0.log" Dec 09 16:40:03 crc kubenswrapper[4716]: I1209 16:40:03.186146 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_552d079e-332b-46db-946b-2777875f3dc7/rsync/0.log" Dec 09 16:40:03 crc kubenswrapper[4716]: I1209 16:40:03.295097 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_552d079e-332b-46db-946b-2777875f3dc7/swift-recon-cron/0.log" Dec 09 16:40:05 crc kubenswrapper[4716]: I1209 16:40:05.214897 4716 scope.go:117] "RemoveContainer" containerID="dda6ec4e8c901cf880782dc4aa7fc34b02c4276f74fb441e2350127e4d167244" Dec 09 16:40:05 crc kubenswrapper[4716]: E1209 16:40:05.215452 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:40:09 crc kubenswrapper[4716]: I1209 16:40:09.911955 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_67d51dc9-9087-4d11-9f1f-0947af797f5d/memcached/0.log" Dec 09 16:40:11 crc kubenswrapper[4716]: E1209 16:40:11.218070 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:40:15 crc kubenswrapper[4716]: E1209 16:40:15.215852 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:40:16 crc kubenswrapper[4716]: I1209 16:40:16.213562 4716 scope.go:117] "RemoveContainer" containerID="dda6ec4e8c901cf880782dc4aa7fc34b02c4276f74fb441e2350127e4d167244" Dec 09 16:40:16 crc kubenswrapper[4716]: E1209 16:40:16.213927 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:40:25 crc kubenswrapper[4716]: E1209 16:40:25.215982 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:40:27 crc kubenswrapper[4716]: E1209 16:40:27.218281 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:40:29 crc kubenswrapper[4716]: I1209 16:40:29.213966 4716 scope.go:117] "RemoveContainer" containerID="dda6ec4e8c901cf880782dc4aa7fc34b02c4276f74fb441e2350127e4d167244" Dec 09 16:40:29 crc kubenswrapper[4716]: E1209 16:40:29.214515 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:40:33 crc kubenswrapper[4716]: I1209 16:40:33.818705 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_09f547718dbde8eef0eb42ddec4b9f2289bf5ccec79a5d1ae23a41cf297f48v_4d0cce9d-1fc7-4f60-9651-e25306d13944/util/0.log" Dec 09 16:40:34 crc kubenswrapper[4716]: I1209 16:40:34.004464 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_09f547718dbde8eef0eb42ddec4b9f2289bf5ccec79a5d1ae23a41cf297f48v_4d0cce9d-1fc7-4f60-9651-e25306d13944/util/0.log" Dec 09 16:40:34 crc kubenswrapper[4716]: I1209 16:40:34.005976 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_09f547718dbde8eef0eb42ddec4b9f2289bf5ccec79a5d1ae23a41cf297f48v_4d0cce9d-1fc7-4f60-9651-e25306d13944/pull/0.log" Dec 09 16:40:34 crc kubenswrapper[4716]: I1209 16:40:34.027938 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_09f547718dbde8eef0eb42ddec4b9f2289bf5ccec79a5d1ae23a41cf297f48v_4d0cce9d-1fc7-4f60-9651-e25306d13944/pull/0.log" Dec 09 16:40:34 crc kubenswrapper[4716]: I1209 16:40:34.233251 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_09f547718dbde8eef0eb42ddec4b9f2289bf5ccec79a5d1ae23a41cf297f48v_4d0cce9d-1fc7-4f60-9651-e25306d13944/pull/0.log" Dec 09 16:40:34 crc kubenswrapper[4716]: I1209 16:40:34.233904 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_09f547718dbde8eef0eb42ddec4b9f2289bf5ccec79a5d1ae23a41cf297f48v_4d0cce9d-1fc7-4f60-9651-e25306d13944/util/0.log" Dec 09 16:40:34 crc kubenswrapper[4716]: I1209 16:40:34.281242 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_09f547718dbde8eef0eb42ddec4b9f2289bf5ccec79a5d1ae23a41cf297f48v_4d0cce9d-1fc7-4f60-9651-e25306d13944/extract/0.log" Dec 09 16:40:34 crc kubenswrapper[4716]: I1209 16:40:34.457035 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7d9dfd778-z2l9s_33b32911-25d0-45d5-8009-4d9787875e86/kube-rbac-proxy/0.log" Dec 09 16:40:34 crc kubenswrapper[4716]: I1209 16:40:34.475000 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7d9dfd778-z2l9s_33b32911-25d0-45d5-8009-4d9787875e86/manager/0.log" Dec 09 16:40:34 crc kubenswrapper[4716]: I1209 16:40:34.760648 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6c677c69b-xzp8n_266cbf21-b738-4045-902b-88deadcc5869/kube-rbac-proxy/0.log" Dec 09 16:40:34 crc kubenswrapper[4716]: I1209 16:40:34.897998 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6c677c69b-xzp8n_266cbf21-b738-4045-902b-88deadcc5869/manager/0.log" Dec 09 16:40:34 crc kubenswrapper[4716]: I1209 16:40:34.946769 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-697fb699cf-p6hrb_ee92f03c-d515-4aa3-ad3c-cce0c89fd12b/kube-rbac-proxy/0.log" Dec 09 16:40:34 crc kubenswrapper[4716]: I1209 16:40:34.977878 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-697fb699cf-p6hrb_ee92f03c-d515-4aa3-ad3c-cce0c89fd12b/manager/0.log" Dec 09 16:40:35 crc kubenswrapper[4716]: I1209 16:40:35.102563 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-5697bb5779-s7mv7_b60a4ca3-f2c6-4829-bc4f-d47b0740e378/kube-rbac-proxy/0.log" Dec 09 16:40:35 crc kubenswrapper[4716]: I1209 16:40:35.216665 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-5697bb5779-s7mv7_b60a4ca3-f2c6-4829-bc4f-d47b0740e378/manager/0.log" Dec 09 16:40:35 crc kubenswrapper[4716]: I1209 16:40:35.321806 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5f64f6f8bb-2ft2d_c2869bc2-fe1e-49a0-9bd5-ec2b1a23b489/kube-rbac-proxy/0.log" Dec 09 16:40:35 crc kubenswrapper[4716]: I1209 16:40:35.413457 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5f64f6f8bb-2ft2d_c2869bc2-fe1e-49a0-9bd5-ec2b1a23b489/manager/0.log" Dec 09 16:40:35 crc kubenswrapper[4716]: I1209 16:40:35.426191 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c6d99b8f-9d9ms_87589b07-5c3c-46c9-b84e-ffc2efa3b817/kube-rbac-proxy/0.log" Dec 09 16:40:35 crc kubenswrapper[4716]: I1209 16:40:35.531953 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c6d99b8f-9d9ms_87589b07-5c3c-46c9-b84e-ffc2efa3b817/manager/0.log" Dec 09 16:40:35 crc kubenswrapper[4716]: I1209 16:40:35.623224 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-78d48bff9d-sktgh_b5a74069-1167-4e2a-a3b2-c11507b121ab/kube-rbac-proxy/0.log" Dec 09 16:40:36 crc kubenswrapper[4716]: I1209 16:40:36.086114 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-967d97867-hq8hn_695a0bab-c2c8-4c7d-9420-2dc191000e54/kube-rbac-proxy/0.log" Dec 09 16:40:36 crc kubenswrapper[4716]: I1209 16:40:36.129437 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-967d97867-hq8hn_695a0bab-c2c8-4c7d-9420-2dc191000e54/manager/0.log" Dec 09 16:40:36 crc kubenswrapper[4716]: I1209 16:40:36.130652 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-78d48bff9d-sktgh_b5a74069-1167-4e2a-a3b2-c11507b121ab/manager/0.log" Dec 09 16:40:36 crc kubenswrapper[4716]: I1209 16:40:36.362006 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7765d96ddf-szpds_436bb095-8486-4d67-9fc5-6596738065cc/kube-rbac-proxy/0.log" Dec 09 16:40:36 crc kubenswrapper[4716]: I1209 16:40:36.369237 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7765d96ddf-szpds_436bb095-8486-4d67-9fc5-6596738065cc/manager/0.log" Dec 09 16:40:36 crc kubenswrapper[4716]: I1209 16:40:36.491912 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5b5fd79c9c-dzpdl_30c82b79-c03d-45d3-8b0a-ca506daf2934/kube-rbac-proxy/0.log" Dec 09 16:40:36 crc kubenswrapper[4716]: I1209 16:40:36.552593 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-79c8c4686c-5pz92_8da5796c-fddb-414b-8d26-6657d25e6c00/kube-rbac-proxy/0.log" Dec 09 16:40:36 crc kubenswrapper[4716]: I1209 16:40:36.572042 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5b5fd79c9c-dzpdl_30c82b79-c03d-45d3-8b0a-ca506daf2934/manager/0.log" Dec 09 16:40:36 crc kubenswrapper[4716]: I1209 16:40:36.717829 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-79c8c4686c-5pz92_8da5796c-fddb-414b-8d26-6657d25e6c00/manager/0.log" Dec 09 16:40:36 crc kubenswrapper[4716]: I1209 16:40:36.982446 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-5fdfd5b6b5-qgr4j_86831782-f8f0-40ad-99f7-4568185065b0/kube-rbac-proxy/0.log" Dec 09 16:40:37 crc kubenswrapper[4716]: I1209 16:40:37.014345 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-5fdfd5b6b5-qgr4j_86831782-f8f0-40ad-99f7-4568185065b0/manager/0.log" Dec 09 16:40:37 crc kubenswrapper[4716]: I1209 16:40:37.213397 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-697bc559fc-pmct6_f1029307-bcf9-40c0-b656-b7d203493022/kube-rbac-proxy/0.log" Dec 09 16:40:37 crc kubenswrapper[4716]: I1209 16:40:37.229394 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-697bc559fc-pmct6_f1029307-bcf9-40c0-b656-b7d203493022/manager/0.log" Dec 09 16:40:37 crc kubenswrapper[4716]: I1209 16:40:37.356349 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-998648c74-kgr69_6890a044-24cb-49b2-9d60-105fa8e57573/kube-rbac-proxy/0.log" Dec 09 16:40:37 crc kubenswrapper[4716]: I1209 16:40:37.425735 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-998648c74-kgr69_6890a044-24cb-49b2-9d60-105fa8e57573/manager/0.log" Dec 09 16:40:37 crc kubenswrapper[4716]: I1209 16:40:37.527148 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-84b575879fdbbht_05653cf9-bc33-44bb-acdd-21dd610a7665/kube-rbac-proxy/0.log" Dec 09 16:40:37 crc kubenswrapper[4716]: I1209 16:40:37.569113 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-84b575879fdbbht_05653cf9-bc33-44bb-acdd-21dd610a7665/manager/0.log" Dec 09 16:40:37 crc kubenswrapper[4716]: I1209 16:40:37.977739 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-wzhdn_685c966f-2aa9-42bc-8d60-d2ebc9b68b0a/registry-server/0.log" Dec 09 16:40:38 crc kubenswrapper[4716]: I1209 16:40:38.014037 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-77b4579dbb-785g6_7b3f5ffb-3826-458b-9777-efd0b8cc747e/operator/0.log" Dec 09 16:40:38 crc kubenswrapper[4716]: I1209 16:40:38.231525 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-b6456fdb6-649h2_d5727f03-aede-454c-8dec-17e10986da51/kube-rbac-proxy/0.log" Dec 09 16:40:38 crc kubenswrapper[4716]: I1209 16:40:38.314965 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-b6456fdb6-649h2_d5727f03-aede-454c-8dec-17e10986da51/manager/0.log" Dec 09 16:40:38 crc kubenswrapper[4716]: I1209 16:40:38.453224 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-78f8948974-fwnp7_43243e09-47d6-428c-adc6-3542056106b5/kube-rbac-proxy/0.log" Dec 09 16:40:38 crc kubenswrapper[4716]: I1209 16:40:38.607222 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-78f8948974-fwnp7_43243e09-47d6-428c-adc6-3542056106b5/manager/0.log" Dec 09 16:40:38 crc kubenswrapper[4716]: I1209 16:40:38.719633 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-tf7dx_08b1c536-2d1d-42fc-9e18-87ebe38bed16/operator/0.log" Dec 09 16:40:38 crc kubenswrapper[4716]: I1209 16:40:38.904678 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-5955d8c9f-mpmbx_3221ee7d-c104-4055-961a-46cd6ba8c602/manager/0.log" Dec 09 16:40:38 crc kubenswrapper[4716]: I1209 16:40:38.914888 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-9d58d64bc-h4nlt_4d332287-0f6e-46d1-9cd9-d31dd855d753/manager/0.log" Dec 09 16:40:38 crc kubenswrapper[4716]: I1209 16:40:38.930951 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-5656d9bf69-llf2c_57b68343-4540-4097-9f68-a538c63bae3b/kube-rbac-proxy/0.log" Dec 09 16:40:38 crc kubenswrapper[4716]: I1209 16:40:38.958804 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-9d58d64bc-h4nlt_4d332287-0f6e-46d1-9cd9-d31dd855d753/kube-rbac-proxy/0.log" Dec 09 16:40:39 crc kubenswrapper[4716]: I1209 16:40:39.100246 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5854674fcc-kspms_ae27699b-b83f-49e3-a0f0-a1fd2fc4c6f2/kube-rbac-proxy/0.log" Dec 09 16:40:39 crc kubenswrapper[4716]: E1209 16:40:39.216383 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:40:39 crc kubenswrapper[4716]: I1209 16:40:39.220880 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5854674fcc-kspms_ae27699b-b83f-49e3-a0f0-a1fd2fc4c6f2/manager/0.log" Dec 09 16:40:39 crc kubenswrapper[4716]: I1209 16:40:39.308531 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-667bd8d554-s7dkv_368b843d-fa53-406c-a5a8-a78a8763718d/kube-rbac-proxy/0.log" Dec 09 16:40:39 crc kubenswrapper[4716]: I1209 16:40:39.389103 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-5656d9bf69-llf2c_57b68343-4540-4097-9f68-a538c63bae3b/manager/0.log" Dec 09 16:40:39 crc kubenswrapper[4716]: I1209 16:40:39.431232 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-667bd8d554-s7dkv_368b843d-fa53-406c-a5a8-a78a8763718d/manager/0.log" Dec 09 16:40:41 crc kubenswrapper[4716]: E1209 16:40:41.216921 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:40:42 crc kubenswrapper[4716]: I1209 16:40:42.214408 4716 scope.go:117] "RemoveContainer" containerID="dda6ec4e8c901cf880782dc4aa7fc34b02c4276f74fb441e2350127e4d167244" Dec 09 16:40:42 crc kubenswrapper[4716]: E1209 16:40:42.214748 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:40:53 crc kubenswrapper[4716]: E1209 16:40:53.223707 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:40:53 crc kubenswrapper[4716]: E1209 16:40:53.224559 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:40:55 crc kubenswrapper[4716]: I1209 16:40:55.213805 4716 scope.go:117] "RemoveContainer" containerID="dda6ec4e8c901cf880782dc4aa7fc34b02c4276f74fb441e2350127e4d167244" Dec 09 16:40:55 crc kubenswrapper[4716]: E1209 16:40:55.214345 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:40:57 crc kubenswrapper[4716]: I1209 16:40:57.523652 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-hcmms_297ecdc1-e23a-469b-a1f5-907876ecdfaa/control-plane-machine-set-operator/0.log" Dec 09 16:40:57 crc kubenswrapper[4716]: I1209 16:40:57.671492 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-w6p7n_277458f8-729e-4250-b4d0-db21713e4e48/machine-api-operator/0.log" Dec 09 16:40:57 crc kubenswrapper[4716]: I1209 16:40:57.709939 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-w6p7n_277458f8-729e-4250-b4d0-db21713e4e48/kube-rbac-proxy/0.log" Dec 09 16:41:05 crc kubenswrapper[4716]: E1209 16:41:05.217117 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:41:07 crc kubenswrapper[4716]: E1209 16:41:07.222273 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:41:09 crc kubenswrapper[4716]: I1209 16:41:09.219919 4716 scope.go:117] "RemoveContainer" containerID="dda6ec4e8c901cf880782dc4aa7fc34b02c4276f74fb441e2350127e4d167244" Dec 09 16:41:09 crc kubenswrapper[4716]: E1209 16:41:09.220865 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:41:09 crc kubenswrapper[4716]: I1209 16:41:09.764670 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-fkctm_d474af73-4a93-4368-90e4-01f572590cab/cert-manager-controller/0.log" Dec 09 16:41:09 crc kubenswrapper[4716]: I1209 16:41:09.923371 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-4skkl_8f4f2faa-6c38-44b8-a6ab-057e6819b498/cert-manager-cainjector/0.log" Dec 09 16:41:10 crc kubenswrapper[4716]: I1209 16:41:10.005112 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-cfbrw_d1900052-b34f-4e6c-93fe-b58810e88ecf/cert-manager-webhook/0.log" Dec 09 16:41:19 crc kubenswrapper[4716]: E1209 16:41:19.216975 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:41:21 crc kubenswrapper[4716]: E1209 16:41:21.220993 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:41:21 crc kubenswrapper[4716]: I1209 16:41:21.916432 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7fbb5f6569-52knq_c3cafcff-7dbc-4599-9fb2-c2931006503d/nmstate-console-plugin/0.log" Dec 09 16:41:22 crc kubenswrapper[4716]: I1209 16:41:22.098364 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-8t6l4_6456c272-4118-4d6d-837a-37e423889025/nmstate-handler/0.log" Dec 09 16:41:22 crc kubenswrapper[4716]: I1209 16:41:22.130309 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-qgx2m_71d69185-6b38-4b98-9699-40641b4ce638/kube-rbac-proxy/0.log" Dec 09 16:41:22 crc kubenswrapper[4716]: I1209 16:41:22.206051 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-qgx2m_71d69185-6b38-4b98-9699-40641b4ce638/nmstate-metrics/0.log" Dec 09 16:41:22 crc kubenswrapper[4716]: I1209 16:41:22.400491 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-5b5b58f5c8-kldvv_07c2807c-d231-400f-aef5-4a71fc98749b/nmstate-operator/0.log" Dec 09 16:41:22 crc kubenswrapper[4716]: I1209 16:41:22.428641 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-5f6d4c5ccb-w8bpd_7e2a8951-b8c0-43a4-ad05-ca0d0000d70b/nmstate-webhook/0.log" Dec 09 16:41:23 crc kubenswrapper[4716]: I1209 16:41:23.221396 4716 scope.go:117] "RemoveContainer" containerID="dda6ec4e8c901cf880782dc4aa7fc34b02c4276f74fb441e2350127e4d167244" Dec 09 16:41:23 crc kubenswrapper[4716]: E1209 16:41:23.221864 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:41:31 crc kubenswrapper[4716]: E1209 16:41:31.216248 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:41:33 crc kubenswrapper[4716]: E1209 16:41:33.225905 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:41:37 crc kubenswrapper[4716]: I1209 16:41:37.482840 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-9bd696f86-gbtzx_b564ab42-fe8b-4b45-a502-0d10a2cedd07/kube-rbac-proxy/0.log" Dec 09 16:41:37 crc kubenswrapper[4716]: I1209 16:41:37.499183 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-9bd696f86-gbtzx_b564ab42-fe8b-4b45-a502-0d10a2cedd07/manager/0.log" Dec 09 16:41:38 crc kubenswrapper[4716]: I1209 16:41:38.214132 4716 scope.go:117] "RemoveContainer" containerID="dda6ec4e8c901cf880782dc4aa7fc34b02c4276f74fb441e2350127e4d167244" Dec 09 16:41:38 crc kubenswrapper[4716]: E1209 16:41:38.214502 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:41:42 crc kubenswrapper[4716]: E1209 16:41:42.218413 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:41:43 crc kubenswrapper[4716]: I1209 16:41:43.274326 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-7ljlb"] Dec 09 16:41:43 crc kubenswrapper[4716]: E1209 16:41:43.275316 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c28c8a3-4208-4879-aba0-2c929c836566" containerName="extract-utilities" Dec 09 16:41:43 crc kubenswrapper[4716]: I1209 16:41:43.275342 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c28c8a3-4208-4879-aba0-2c929c836566" containerName="extract-utilities" Dec 09 16:41:43 crc kubenswrapper[4716]: E1209 16:41:43.275364 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c28c8a3-4208-4879-aba0-2c929c836566" containerName="registry-server" Dec 09 16:41:43 crc kubenswrapper[4716]: I1209 16:41:43.275372 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c28c8a3-4208-4879-aba0-2c929c836566" containerName="registry-server" Dec 09 16:41:43 crc kubenswrapper[4716]: E1209 16:41:43.275390 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c28c8a3-4208-4879-aba0-2c929c836566" containerName="extract-content" Dec 09 16:41:43 crc kubenswrapper[4716]: I1209 16:41:43.275398 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c28c8a3-4208-4879-aba0-2c929c836566" containerName="extract-content" Dec 09 16:41:43 crc kubenswrapper[4716]: E1209 16:41:43.275420 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="104cf567-6f5a-40f3-b157-8f43300a8492" containerName="container-00" Dec 09 16:41:43 crc kubenswrapper[4716]: I1209 16:41:43.275428 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="104cf567-6f5a-40f3-b157-8f43300a8492" containerName="container-00" Dec 09 16:41:43 crc kubenswrapper[4716]: I1209 16:41:43.275754 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="104cf567-6f5a-40f3-b157-8f43300a8492" containerName="container-00" Dec 09 16:41:43 crc kubenswrapper[4716]: I1209 16:41:43.275781 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c28c8a3-4208-4879-aba0-2c929c836566" containerName="registry-server" Dec 09 16:41:43 crc kubenswrapper[4716]: I1209 16:41:43.278236 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7ljlb" Dec 09 16:41:43 crc kubenswrapper[4716]: I1209 16:41:43.298395 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7ljlb"] Dec 09 16:41:43 crc kubenswrapper[4716]: I1209 16:41:43.392941 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qqx5h\" (UniqueName: \"kubernetes.io/projected/ce1d3ff7-c6b0-4ad1-95e7-5e629f44cc14-kube-api-access-qqx5h\") pod \"redhat-operators-7ljlb\" (UID: \"ce1d3ff7-c6b0-4ad1-95e7-5e629f44cc14\") " pod="openshift-marketplace/redhat-operators-7ljlb" Dec 09 16:41:43 crc kubenswrapper[4716]: I1209 16:41:43.393391 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce1d3ff7-c6b0-4ad1-95e7-5e629f44cc14-utilities\") pod \"redhat-operators-7ljlb\" (UID: \"ce1d3ff7-c6b0-4ad1-95e7-5e629f44cc14\") " pod="openshift-marketplace/redhat-operators-7ljlb" Dec 09 16:41:43 crc kubenswrapper[4716]: I1209 16:41:43.393807 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce1d3ff7-c6b0-4ad1-95e7-5e629f44cc14-catalog-content\") pod \"redhat-operators-7ljlb\" (UID: \"ce1d3ff7-c6b0-4ad1-95e7-5e629f44cc14\") " pod="openshift-marketplace/redhat-operators-7ljlb" Dec 09 16:41:43 crc kubenswrapper[4716]: I1209 16:41:43.496609 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce1d3ff7-c6b0-4ad1-95e7-5e629f44cc14-utilities\") pod \"redhat-operators-7ljlb\" (UID: \"ce1d3ff7-c6b0-4ad1-95e7-5e629f44cc14\") " pod="openshift-marketplace/redhat-operators-7ljlb" Dec 09 16:41:43 crc kubenswrapper[4716]: I1209 16:41:43.496703 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce1d3ff7-c6b0-4ad1-95e7-5e629f44cc14-catalog-content\") pod \"redhat-operators-7ljlb\" (UID: \"ce1d3ff7-c6b0-4ad1-95e7-5e629f44cc14\") " pod="openshift-marketplace/redhat-operators-7ljlb" Dec 09 16:41:43 crc kubenswrapper[4716]: I1209 16:41:43.497188 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qqx5h\" (UniqueName: \"kubernetes.io/projected/ce1d3ff7-c6b0-4ad1-95e7-5e629f44cc14-kube-api-access-qqx5h\") pod \"redhat-operators-7ljlb\" (UID: \"ce1d3ff7-c6b0-4ad1-95e7-5e629f44cc14\") " pod="openshift-marketplace/redhat-operators-7ljlb" Dec 09 16:41:43 crc kubenswrapper[4716]: I1209 16:41:43.497309 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce1d3ff7-c6b0-4ad1-95e7-5e629f44cc14-catalog-content\") pod \"redhat-operators-7ljlb\" (UID: \"ce1d3ff7-c6b0-4ad1-95e7-5e629f44cc14\") " pod="openshift-marketplace/redhat-operators-7ljlb" Dec 09 16:41:43 crc kubenswrapper[4716]: I1209 16:41:43.497687 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce1d3ff7-c6b0-4ad1-95e7-5e629f44cc14-utilities\") pod \"redhat-operators-7ljlb\" (UID: \"ce1d3ff7-c6b0-4ad1-95e7-5e629f44cc14\") " pod="openshift-marketplace/redhat-operators-7ljlb" Dec 09 16:41:43 crc kubenswrapper[4716]: I1209 16:41:43.526357 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qqx5h\" (UniqueName: \"kubernetes.io/projected/ce1d3ff7-c6b0-4ad1-95e7-5e629f44cc14-kube-api-access-qqx5h\") pod \"redhat-operators-7ljlb\" (UID: \"ce1d3ff7-c6b0-4ad1-95e7-5e629f44cc14\") " pod="openshift-marketplace/redhat-operators-7ljlb" Dec 09 16:41:43 crc kubenswrapper[4716]: I1209 16:41:43.600174 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7ljlb" Dec 09 16:41:44 crc kubenswrapper[4716]: I1209 16:41:44.161471 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7ljlb"] Dec 09 16:41:44 crc kubenswrapper[4716]: I1209 16:41:44.311525 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7ljlb" event={"ID":"ce1d3ff7-c6b0-4ad1-95e7-5e629f44cc14","Type":"ContainerStarted","Data":"3da5cd960a3fb7e1f10dcc6830a77937f72886d1fdb104fe8d7d2eb65a670416"} Dec 09 16:41:45 crc kubenswrapper[4716]: I1209 16:41:45.324400 4716 generic.go:334] "Generic (PLEG): container finished" podID="ce1d3ff7-c6b0-4ad1-95e7-5e629f44cc14" containerID="b26079b137edcb38c3cabef3c39c1fb4098a502c78cd971063f43b8d4c1c8a91" exitCode=0 Dec 09 16:41:45 crc kubenswrapper[4716]: I1209 16:41:45.324675 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7ljlb" event={"ID":"ce1d3ff7-c6b0-4ad1-95e7-5e629f44cc14","Type":"ContainerDied","Data":"b26079b137edcb38c3cabef3c39c1fb4098a502c78cd971063f43b8d4c1c8a91"} Dec 09 16:41:48 crc kubenswrapper[4716]: E1209 16:41:48.216938 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:41:51 crc kubenswrapper[4716]: I1209 16:41:51.214055 4716 scope.go:117] "RemoveContainer" containerID="dda6ec4e8c901cf880782dc4aa7fc34b02c4276f74fb441e2350127e4d167244" Dec 09 16:41:53 crc kubenswrapper[4716]: I1209 16:41:53.606910 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_cluster-logging-operator-ff9846bd-vvvc4_90319519-b1f6-4387-aa09-eacd3c5b33ff/cluster-logging-operator/0.log" Dec 09 16:41:53 crc kubenswrapper[4716]: I1209 16:41:53.820387 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_collector-88hn7_367f5f03-2020-4489-95e5-ec09b8e37128/collector/0.log" Dec 09 16:41:53 crc kubenswrapper[4716]: I1209 16:41:53.872681 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-compactor-0_610ad8de-905a-4420-adc6-a0b438e2b346/loki-compactor/0.log" Dec 09 16:41:54 crc kubenswrapper[4716]: I1209 16:41:54.078911 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-distributor-76cc67bf56-shzjb_96be7883-9eb8-4f5c-9376-e4237b3663f8/loki-distributor/0.log" Dec 09 16:41:54 crc kubenswrapper[4716]: I1209 16:41:54.132989 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-58996586f7-4xczf_3047bff0-e43d-44a3-9a8e-7491cbc979af/gateway/0.log" Dec 09 16:41:54 crc kubenswrapper[4716]: I1209 16:41:54.576751 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-58996586f7-4xczf_3047bff0-e43d-44a3-9a8e-7491cbc979af/opa/0.log" Dec 09 16:41:54 crc kubenswrapper[4716]: I1209 16:41:54.576932 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-58996586f7-7vqxj_f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef/opa/0.log" Dec 09 16:41:54 crc kubenswrapper[4716]: I1209 16:41:54.577003 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-58996586f7-7vqxj_f2c74b99-fcc0-44db-9ffa-f7e8ee0c66ef/gateway/0.log" Dec 09 16:41:54 crc kubenswrapper[4716]: I1209 16:41:54.627245 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-index-gateway-0_841d38f9-6814-4dc5-bb78-c154f42b3257/loki-index-gateway/0.log" Dec 09 16:41:54 crc kubenswrapper[4716]: I1209 16:41:54.800436 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-ingester-0_00f16fe7-88b3-4d0a-ba38-f68a4d340686/loki-ingester/0.log" Dec 09 16:41:54 crc kubenswrapper[4716]: I1209 16:41:54.873019 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-querier-5895d59bb8-bqnhh_4013658d-da41-4b67-84da-b0ca67216d45/loki-querier/0.log" Dec 09 16:41:55 crc kubenswrapper[4716]: I1209 16:41:55.014921 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-query-frontend-84558f7c9f-zcnfl_1c35fb07-6fe6-490e-b627-165a5500e574/loki-query-frontend/0.log" Dec 09 16:41:55 crc kubenswrapper[4716]: E1209 16:41:55.219255 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:41:57 crc kubenswrapper[4716]: I1209 16:41:57.482863 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerStarted","Data":"25624bd532411991661615df205e51c08162f3ddb1213bad665cdcf2e9f1cb15"} Dec 09 16:41:58 crc kubenswrapper[4716]: I1209 16:41:58.508848 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-wlth4"] Dec 09 16:41:58 crc kubenswrapper[4716]: I1209 16:41:58.524980 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wlth4" Dec 09 16:41:58 crc kubenswrapper[4716]: I1209 16:41:58.557601 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wlth4"] Dec 09 16:41:58 crc kubenswrapper[4716]: I1209 16:41:58.561946 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7ljlb" event={"ID":"ce1d3ff7-c6b0-4ad1-95e7-5e629f44cc14","Type":"ContainerStarted","Data":"fe69befd44f7ba9ec384b175f7ff10eefea967baa5573761fc6b7178ff9de3a0"} Dec 09 16:41:58 crc kubenswrapper[4716]: I1209 16:41:58.709953 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/604e3cba-62d9-49da-9651-14ed6837d49c-catalog-content\") pod \"certified-operators-wlth4\" (UID: \"604e3cba-62d9-49da-9651-14ed6837d49c\") " pod="openshift-marketplace/certified-operators-wlth4" Dec 09 16:41:58 crc kubenswrapper[4716]: I1209 16:41:58.710299 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8qp2b\" (UniqueName: \"kubernetes.io/projected/604e3cba-62d9-49da-9651-14ed6837d49c-kube-api-access-8qp2b\") pod \"certified-operators-wlth4\" (UID: \"604e3cba-62d9-49da-9651-14ed6837d49c\") " pod="openshift-marketplace/certified-operators-wlth4" Dec 09 16:41:58 crc kubenswrapper[4716]: I1209 16:41:58.710519 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/604e3cba-62d9-49da-9651-14ed6837d49c-utilities\") pod \"certified-operators-wlth4\" (UID: \"604e3cba-62d9-49da-9651-14ed6837d49c\") " pod="openshift-marketplace/certified-operators-wlth4" Dec 09 16:41:58 crc kubenswrapper[4716]: I1209 16:41:58.813474 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/604e3cba-62d9-49da-9651-14ed6837d49c-utilities\") pod \"certified-operators-wlth4\" (UID: \"604e3cba-62d9-49da-9651-14ed6837d49c\") " pod="openshift-marketplace/certified-operators-wlth4" Dec 09 16:41:58 crc kubenswrapper[4716]: I1209 16:41:58.813665 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/604e3cba-62d9-49da-9651-14ed6837d49c-catalog-content\") pod \"certified-operators-wlth4\" (UID: \"604e3cba-62d9-49da-9651-14ed6837d49c\") " pod="openshift-marketplace/certified-operators-wlth4" Dec 09 16:41:58 crc kubenswrapper[4716]: I1209 16:41:58.813732 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8qp2b\" (UniqueName: \"kubernetes.io/projected/604e3cba-62d9-49da-9651-14ed6837d49c-kube-api-access-8qp2b\") pod \"certified-operators-wlth4\" (UID: \"604e3cba-62d9-49da-9651-14ed6837d49c\") " pod="openshift-marketplace/certified-operators-wlth4" Dec 09 16:41:58 crc kubenswrapper[4716]: I1209 16:41:58.814249 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/604e3cba-62d9-49da-9651-14ed6837d49c-catalog-content\") pod \"certified-operators-wlth4\" (UID: \"604e3cba-62d9-49da-9651-14ed6837d49c\") " pod="openshift-marketplace/certified-operators-wlth4" Dec 09 16:41:58 crc kubenswrapper[4716]: I1209 16:41:58.814696 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/604e3cba-62d9-49da-9651-14ed6837d49c-utilities\") pod \"certified-operators-wlth4\" (UID: \"604e3cba-62d9-49da-9651-14ed6837d49c\") " pod="openshift-marketplace/certified-operators-wlth4" Dec 09 16:41:58 crc kubenswrapper[4716]: I1209 16:41:58.840664 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8qp2b\" (UniqueName: \"kubernetes.io/projected/604e3cba-62d9-49da-9651-14ed6837d49c-kube-api-access-8qp2b\") pod \"certified-operators-wlth4\" (UID: \"604e3cba-62d9-49da-9651-14ed6837d49c\") " pod="openshift-marketplace/certified-operators-wlth4" Dec 09 16:41:58 crc kubenswrapper[4716]: I1209 16:41:58.871054 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wlth4" Dec 09 16:41:59 crc kubenswrapper[4716]: I1209 16:41:59.473140 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wlth4"] Dec 09 16:41:59 crc kubenswrapper[4716]: I1209 16:41:59.586348 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wlth4" event={"ID":"604e3cba-62d9-49da-9651-14ed6837d49c","Type":"ContainerStarted","Data":"4424560c6d3bf0a79f430b000bae89c5a7d90b0c329ff3110d5d59ffd91ba2f8"} Dec 09 16:41:59 crc kubenswrapper[4716]: I1209 16:41:59.588500 4716 generic.go:334] "Generic (PLEG): container finished" podID="ce1d3ff7-c6b0-4ad1-95e7-5e629f44cc14" containerID="fe69befd44f7ba9ec384b175f7ff10eefea967baa5573761fc6b7178ff9de3a0" exitCode=0 Dec 09 16:41:59 crc kubenswrapper[4716]: I1209 16:41:59.588548 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7ljlb" event={"ID":"ce1d3ff7-c6b0-4ad1-95e7-5e629f44cc14","Type":"ContainerDied","Data":"fe69befd44f7ba9ec384b175f7ff10eefea967baa5573761fc6b7178ff9de3a0"} Dec 09 16:42:00 crc kubenswrapper[4716]: I1209 16:42:00.608150 4716 generic.go:334] "Generic (PLEG): container finished" podID="604e3cba-62d9-49da-9651-14ed6837d49c" containerID="a13ea6ee2b5bfc11e6cd19a6a1c279eea16d2ad4a2cce875e51701957d081443" exitCode=0 Dec 09 16:42:00 crc kubenswrapper[4716]: I1209 16:42:00.608306 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wlth4" event={"ID":"604e3cba-62d9-49da-9651-14ed6837d49c","Type":"ContainerDied","Data":"a13ea6ee2b5bfc11e6cd19a6a1c279eea16d2ad4a2cce875e51701957d081443"} Dec 09 16:42:01 crc kubenswrapper[4716]: E1209 16:42:01.217143 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:42:01 crc kubenswrapper[4716]: I1209 16:42:01.625534 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7ljlb" event={"ID":"ce1d3ff7-c6b0-4ad1-95e7-5e629f44cc14","Type":"ContainerStarted","Data":"3822c469f7bc36781e995a3b237c465fb6ce34d921329386303c49e05a2a7313"} Dec 09 16:42:01 crc kubenswrapper[4716]: I1209 16:42:01.662660 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-7ljlb" podStartSLOduration=3.781252589 podStartE2EDuration="18.662618997s" podCreationTimestamp="2025-12-09 16:41:43 +0000 UTC" firstStartedPulling="2025-12-09 16:41:45.326800744 +0000 UTC m=+5592.481544732" lastFinishedPulling="2025-12-09 16:42:00.208167152 +0000 UTC m=+5607.362911140" observedRunningTime="2025-12-09 16:42:01.64968667 +0000 UTC m=+5608.804430658" watchObservedRunningTime="2025-12-09 16:42:01.662618997 +0000 UTC m=+5608.817362985" Dec 09 16:42:02 crc kubenswrapper[4716]: I1209 16:42:02.643963 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wlth4" event={"ID":"604e3cba-62d9-49da-9651-14ed6837d49c","Type":"ContainerStarted","Data":"8c60b6ef5f7c582dc57738574784ebd9d7d61bbc18c9b1ecbaef0dcedc6679db"} Dec 09 16:42:03 crc kubenswrapper[4716]: I1209 16:42:03.601806 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-7ljlb" Dec 09 16:42:03 crc kubenswrapper[4716]: I1209 16:42:03.602202 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-7ljlb" Dec 09 16:42:04 crc kubenswrapper[4716]: I1209 16:42:04.659816 4716 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-7ljlb" podUID="ce1d3ff7-c6b0-4ad1-95e7-5e629f44cc14" containerName="registry-server" probeResult="failure" output=< Dec 09 16:42:04 crc kubenswrapper[4716]: timeout: failed to connect service ":50051" within 1s Dec 09 16:42:04 crc kubenswrapper[4716]: > Dec 09 16:42:05 crc kubenswrapper[4716]: I1209 16:42:05.684547 4716 generic.go:334] "Generic (PLEG): container finished" podID="604e3cba-62d9-49da-9651-14ed6837d49c" containerID="8c60b6ef5f7c582dc57738574784ebd9d7d61bbc18c9b1ecbaef0dcedc6679db" exitCode=0 Dec 09 16:42:05 crc kubenswrapper[4716]: I1209 16:42:05.684640 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wlth4" event={"ID":"604e3cba-62d9-49da-9651-14ed6837d49c","Type":"ContainerDied","Data":"8c60b6ef5f7c582dc57738574784ebd9d7d61bbc18c9b1ecbaef0dcedc6679db"} Dec 09 16:42:06 crc kubenswrapper[4716]: I1209 16:42:06.700944 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wlth4" event={"ID":"604e3cba-62d9-49da-9651-14ed6837d49c","Type":"ContainerStarted","Data":"5634f146412a63dfd67f8359d5cfcfdb5398d5955360a705195d26a03161f331"} Dec 09 16:42:06 crc kubenswrapper[4716]: I1209 16:42:06.729146 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-wlth4" podStartSLOduration=2.955960784 podStartE2EDuration="8.729120874s" podCreationTimestamp="2025-12-09 16:41:58 +0000 UTC" firstStartedPulling="2025-12-09 16:42:00.610451589 +0000 UTC m=+5607.765195577" lastFinishedPulling="2025-12-09 16:42:06.383611679 +0000 UTC m=+5613.538355667" observedRunningTime="2025-12-09 16:42:06.718167403 +0000 UTC m=+5613.872911391" watchObservedRunningTime="2025-12-09 16:42:06.729120874 +0000 UTC m=+5613.883864862" Dec 09 16:42:08 crc kubenswrapper[4716]: I1209 16:42:08.873323 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-wlth4" Dec 09 16:42:08 crc kubenswrapper[4716]: I1209 16:42:08.874360 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-wlth4" Dec 09 16:42:08 crc kubenswrapper[4716]: I1209 16:42:08.931494 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-wlth4" Dec 09 16:42:10 crc kubenswrapper[4716]: E1209 16:42:10.215938 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:42:12 crc kubenswrapper[4716]: I1209 16:42:12.289149 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-8gjtr_c34d08f8-624c-47b0-9373-f96e9f280c74/kube-rbac-proxy/0.log" Dec 09 16:42:12 crc kubenswrapper[4716]: I1209 16:42:12.390222 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-8gjtr_c34d08f8-624c-47b0-9373-f96e9f280c74/controller/0.log" Dec 09 16:42:12 crc kubenswrapper[4716]: I1209 16:42:12.478645 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7fcb986d4-t252v_8d5472cc-28f8-4887-97d5-b428c89a1730/frr-k8s-webhook-server/0.log" Dec 09 16:42:12 crc kubenswrapper[4716]: I1209 16:42:12.576615 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ztmht_e2caed4f-2e2a-4658-8705-49c1b6d66492/cp-frr-files/0.log" Dec 09 16:42:12 crc kubenswrapper[4716]: I1209 16:42:12.812486 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ztmht_e2caed4f-2e2a-4658-8705-49c1b6d66492/cp-metrics/0.log" Dec 09 16:42:12 crc kubenswrapper[4716]: I1209 16:42:12.825153 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ztmht_e2caed4f-2e2a-4658-8705-49c1b6d66492/cp-frr-files/0.log" Dec 09 16:42:12 crc kubenswrapper[4716]: I1209 16:42:12.837135 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ztmht_e2caed4f-2e2a-4658-8705-49c1b6d66492/cp-reloader/0.log" Dec 09 16:42:12 crc kubenswrapper[4716]: I1209 16:42:12.862201 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ztmht_e2caed4f-2e2a-4658-8705-49c1b6d66492/cp-reloader/0.log" Dec 09 16:42:13 crc kubenswrapper[4716]: I1209 16:42:13.072718 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ztmht_e2caed4f-2e2a-4658-8705-49c1b6d66492/cp-reloader/0.log" Dec 09 16:42:13 crc kubenswrapper[4716]: I1209 16:42:13.073474 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ztmht_e2caed4f-2e2a-4658-8705-49c1b6d66492/cp-frr-files/0.log" Dec 09 16:42:13 crc kubenswrapper[4716]: I1209 16:42:13.097478 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ztmht_e2caed4f-2e2a-4658-8705-49c1b6d66492/cp-metrics/0.log" Dec 09 16:42:13 crc kubenswrapper[4716]: I1209 16:42:13.113751 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ztmht_e2caed4f-2e2a-4658-8705-49c1b6d66492/cp-metrics/0.log" Dec 09 16:42:13 crc kubenswrapper[4716]: E1209 16:42:13.224748 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:42:13 crc kubenswrapper[4716]: I1209 16:42:13.322639 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ztmht_e2caed4f-2e2a-4658-8705-49c1b6d66492/cp-reloader/0.log" Dec 09 16:42:13 crc kubenswrapper[4716]: I1209 16:42:13.323878 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ztmht_e2caed4f-2e2a-4658-8705-49c1b6d66492/cp-frr-files/0.log" Dec 09 16:42:13 crc kubenswrapper[4716]: I1209 16:42:13.333055 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ztmht_e2caed4f-2e2a-4658-8705-49c1b6d66492/cp-metrics/0.log" Dec 09 16:42:13 crc kubenswrapper[4716]: I1209 16:42:13.338883 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ztmht_e2caed4f-2e2a-4658-8705-49c1b6d66492/controller/0.log" Dec 09 16:42:13 crc kubenswrapper[4716]: I1209 16:42:13.556691 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ztmht_e2caed4f-2e2a-4658-8705-49c1b6d66492/frr-metrics/0.log" Dec 09 16:42:13 crc kubenswrapper[4716]: I1209 16:42:13.578868 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ztmht_e2caed4f-2e2a-4658-8705-49c1b6d66492/kube-rbac-proxy/0.log" Dec 09 16:42:13 crc kubenswrapper[4716]: I1209 16:42:13.586516 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ztmht_e2caed4f-2e2a-4658-8705-49c1b6d66492/kube-rbac-proxy-frr/0.log" Dec 09 16:42:13 crc kubenswrapper[4716]: I1209 16:42:13.682371 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-7ljlb" Dec 09 16:42:13 crc kubenswrapper[4716]: I1209 16:42:13.750459 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-7ljlb" Dec 09 16:42:13 crc kubenswrapper[4716]: I1209 16:42:13.883196 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-5655658545-cjrmh_e79da35c-ab1c-40ac-89b1-4406af3ff5f5/manager/0.log" Dec 09 16:42:13 crc kubenswrapper[4716]: I1209 16:42:13.896092 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ztmht_e2caed4f-2e2a-4658-8705-49c1b6d66492/reloader/0.log" Dec 09 16:42:14 crc kubenswrapper[4716]: I1209 16:42:14.084662 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-65998d95b4-8s5d4_2a6158db-9b91-48c4-a8fb-610541de1ebe/webhook-server/0.log" Dec 09 16:42:14 crc kubenswrapper[4716]: I1209 16:42:14.299258 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7ljlb"] Dec 09 16:42:14 crc kubenswrapper[4716]: I1209 16:42:14.378224 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-z4t47_59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d/kube-rbac-proxy/0.log" Dec 09 16:42:14 crc kubenswrapper[4716]: I1209 16:42:14.484585 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-r7ztf"] Dec 09 16:42:14 crc kubenswrapper[4716]: I1209 16:42:14.485172 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-r7ztf" podUID="252727f3-87c7-420e-9bc2-5a7399f2e0e7" containerName="registry-server" containerID="cri-o://b6fc51f749dd339ef4ab830d4cc605e4b6faade1e3e5d49575eeb44b2d54b380" gracePeriod=2 Dec 09 16:42:14 crc kubenswrapper[4716]: I1209 16:42:14.797172 4716 generic.go:334] "Generic (PLEG): container finished" podID="252727f3-87c7-420e-9bc2-5a7399f2e0e7" containerID="b6fc51f749dd339ef4ab830d4cc605e4b6faade1e3e5d49575eeb44b2d54b380" exitCode=0 Dec 09 16:42:14 crc kubenswrapper[4716]: I1209 16:42:14.798491 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r7ztf" event={"ID":"252727f3-87c7-420e-9bc2-5a7399f2e0e7","Type":"ContainerDied","Data":"b6fc51f749dd339ef4ab830d4cc605e4b6faade1e3e5d49575eeb44b2d54b380"} Dec 09 16:42:15 crc kubenswrapper[4716]: I1209 16:42:15.112387 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-z4t47_59f248b3-5ec0-4aef-bee1-1e0bf7ebaa9d/speaker/0.log" Dec 09 16:42:15 crc kubenswrapper[4716]: I1209 16:42:15.156955 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r7ztf" Dec 09 16:42:15 crc kubenswrapper[4716]: I1209 16:42:15.310390 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/252727f3-87c7-420e-9bc2-5a7399f2e0e7-utilities\") pod \"252727f3-87c7-420e-9bc2-5a7399f2e0e7\" (UID: \"252727f3-87c7-420e-9bc2-5a7399f2e0e7\") " Dec 09 16:42:15 crc kubenswrapper[4716]: I1209 16:42:15.310749 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/252727f3-87c7-420e-9bc2-5a7399f2e0e7-catalog-content\") pod \"252727f3-87c7-420e-9bc2-5a7399f2e0e7\" (UID: \"252727f3-87c7-420e-9bc2-5a7399f2e0e7\") " Dec 09 16:42:15 crc kubenswrapper[4716]: I1209 16:42:15.310781 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vxr5v\" (UniqueName: \"kubernetes.io/projected/252727f3-87c7-420e-9bc2-5a7399f2e0e7-kube-api-access-vxr5v\") pod \"252727f3-87c7-420e-9bc2-5a7399f2e0e7\" (UID: \"252727f3-87c7-420e-9bc2-5a7399f2e0e7\") " Dec 09 16:42:15 crc kubenswrapper[4716]: I1209 16:42:15.313065 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/252727f3-87c7-420e-9bc2-5a7399f2e0e7-utilities" (OuterVolumeSpecName: "utilities") pod "252727f3-87c7-420e-9bc2-5a7399f2e0e7" (UID: "252727f3-87c7-420e-9bc2-5a7399f2e0e7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:42:15 crc kubenswrapper[4716]: I1209 16:42:15.341917 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/252727f3-87c7-420e-9bc2-5a7399f2e0e7-kube-api-access-vxr5v" (OuterVolumeSpecName: "kube-api-access-vxr5v") pod "252727f3-87c7-420e-9bc2-5a7399f2e0e7" (UID: "252727f3-87c7-420e-9bc2-5a7399f2e0e7"). InnerVolumeSpecName "kube-api-access-vxr5v". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:42:15 crc kubenswrapper[4716]: I1209 16:42:15.414366 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vxr5v\" (UniqueName: \"kubernetes.io/projected/252727f3-87c7-420e-9bc2-5a7399f2e0e7-kube-api-access-vxr5v\") on node \"crc\" DevicePath \"\"" Dec 09 16:42:15 crc kubenswrapper[4716]: I1209 16:42:15.414413 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/252727f3-87c7-420e-9bc2-5a7399f2e0e7-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 16:42:15 crc kubenswrapper[4716]: I1209 16:42:15.488739 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/252727f3-87c7-420e-9bc2-5a7399f2e0e7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "252727f3-87c7-420e-9bc2-5a7399f2e0e7" (UID: "252727f3-87c7-420e-9bc2-5a7399f2e0e7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:42:15 crc kubenswrapper[4716]: I1209 16:42:15.519961 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/252727f3-87c7-420e-9bc2-5a7399f2e0e7-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 16:42:15 crc kubenswrapper[4716]: I1209 16:42:15.702290 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ztmht_e2caed4f-2e2a-4658-8705-49c1b6d66492/frr/0.log" Dec 09 16:42:15 crc kubenswrapper[4716]: I1209 16:42:15.812554 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r7ztf" event={"ID":"252727f3-87c7-420e-9bc2-5a7399f2e0e7","Type":"ContainerDied","Data":"c7272b4ee7d5df1c0718e9f244173de725dc66cfa37acbdca98b662492fa0c0c"} Dec 09 16:42:15 crc kubenswrapper[4716]: I1209 16:42:15.812630 4716 scope.go:117] "RemoveContainer" containerID="b6fc51f749dd339ef4ab830d4cc605e4b6faade1e3e5d49575eeb44b2d54b380" Dec 09 16:42:15 crc kubenswrapper[4716]: I1209 16:42:15.812685 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r7ztf" Dec 09 16:42:15 crc kubenswrapper[4716]: I1209 16:42:15.842865 4716 scope.go:117] "RemoveContainer" containerID="306c6891d2d6f0c56b26642b63a86fc50e4c0a1ef4140d21e9e594c06af5524e" Dec 09 16:42:15 crc kubenswrapper[4716]: I1209 16:42:15.864758 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-r7ztf"] Dec 09 16:42:15 crc kubenswrapper[4716]: I1209 16:42:15.876132 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-r7ztf"] Dec 09 16:42:15 crc kubenswrapper[4716]: I1209 16:42:15.878269 4716 scope.go:117] "RemoveContainer" containerID="481940dfef0f52c0f906f3c9b73894202bb186abcebfb5834fa7abee4a1ad9b2" Dec 09 16:42:17 crc kubenswrapper[4716]: I1209 16:42:17.229845 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="252727f3-87c7-420e-9bc2-5a7399f2e0e7" path="/var/lib/kubelet/pods/252727f3-87c7-420e-9bc2-5a7399f2e0e7/volumes" Dec 09 16:42:18 crc kubenswrapper[4716]: I1209 16:42:18.927128 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-wlth4" Dec 09 16:42:19 crc kubenswrapper[4716]: I1209 16:42:19.678657 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wlth4"] Dec 09 16:42:19 crc kubenswrapper[4716]: I1209 16:42:19.851763 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-wlth4" podUID="604e3cba-62d9-49da-9651-14ed6837d49c" containerName="registry-server" containerID="cri-o://5634f146412a63dfd67f8359d5cfcfdb5398d5955360a705195d26a03161f331" gracePeriod=2 Dec 09 16:42:20 crc kubenswrapper[4716]: I1209 16:42:20.870896 4716 generic.go:334] "Generic (PLEG): container finished" podID="604e3cba-62d9-49da-9651-14ed6837d49c" containerID="5634f146412a63dfd67f8359d5cfcfdb5398d5955360a705195d26a03161f331" exitCode=0 Dec 09 16:42:20 crc kubenswrapper[4716]: I1209 16:42:20.871431 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wlth4" event={"ID":"604e3cba-62d9-49da-9651-14ed6837d49c","Type":"ContainerDied","Data":"5634f146412a63dfd67f8359d5cfcfdb5398d5955360a705195d26a03161f331"} Dec 09 16:42:21 crc kubenswrapper[4716]: I1209 16:42:21.148975 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wlth4" Dec 09 16:42:21 crc kubenswrapper[4716]: I1209 16:42:21.279895 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8qp2b\" (UniqueName: \"kubernetes.io/projected/604e3cba-62d9-49da-9651-14ed6837d49c-kube-api-access-8qp2b\") pod \"604e3cba-62d9-49da-9651-14ed6837d49c\" (UID: \"604e3cba-62d9-49da-9651-14ed6837d49c\") " Dec 09 16:42:21 crc kubenswrapper[4716]: I1209 16:42:21.279961 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/604e3cba-62d9-49da-9651-14ed6837d49c-catalog-content\") pod \"604e3cba-62d9-49da-9651-14ed6837d49c\" (UID: \"604e3cba-62d9-49da-9651-14ed6837d49c\") " Dec 09 16:42:21 crc kubenswrapper[4716]: I1209 16:42:21.280001 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/604e3cba-62d9-49da-9651-14ed6837d49c-utilities\") pod \"604e3cba-62d9-49da-9651-14ed6837d49c\" (UID: \"604e3cba-62d9-49da-9651-14ed6837d49c\") " Dec 09 16:42:21 crc kubenswrapper[4716]: I1209 16:42:21.281220 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/604e3cba-62d9-49da-9651-14ed6837d49c-utilities" (OuterVolumeSpecName: "utilities") pod "604e3cba-62d9-49da-9651-14ed6837d49c" (UID: "604e3cba-62d9-49da-9651-14ed6837d49c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:42:21 crc kubenswrapper[4716]: I1209 16:42:21.283525 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/604e3cba-62d9-49da-9651-14ed6837d49c-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 16:42:21 crc kubenswrapper[4716]: I1209 16:42:21.286298 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/604e3cba-62d9-49da-9651-14ed6837d49c-kube-api-access-8qp2b" (OuterVolumeSpecName: "kube-api-access-8qp2b") pod "604e3cba-62d9-49da-9651-14ed6837d49c" (UID: "604e3cba-62d9-49da-9651-14ed6837d49c"). InnerVolumeSpecName "kube-api-access-8qp2b". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:42:21 crc kubenswrapper[4716]: I1209 16:42:21.333222 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/604e3cba-62d9-49da-9651-14ed6837d49c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "604e3cba-62d9-49da-9651-14ed6837d49c" (UID: "604e3cba-62d9-49da-9651-14ed6837d49c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:42:21 crc kubenswrapper[4716]: I1209 16:42:21.387890 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8qp2b\" (UniqueName: \"kubernetes.io/projected/604e3cba-62d9-49da-9651-14ed6837d49c-kube-api-access-8qp2b\") on node \"crc\" DevicePath \"\"" Dec 09 16:42:21 crc kubenswrapper[4716]: I1209 16:42:21.388864 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/604e3cba-62d9-49da-9651-14ed6837d49c-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 16:42:21 crc kubenswrapper[4716]: I1209 16:42:21.884440 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wlth4" event={"ID":"604e3cba-62d9-49da-9651-14ed6837d49c","Type":"ContainerDied","Data":"4424560c6d3bf0a79f430b000bae89c5a7d90b0c329ff3110d5d59ffd91ba2f8"} Dec 09 16:42:21 crc kubenswrapper[4716]: I1209 16:42:21.884504 4716 scope.go:117] "RemoveContainer" containerID="5634f146412a63dfd67f8359d5cfcfdb5398d5955360a705195d26a03161f331" Dec 09 16:42:21 crc kubenswrapper[4716]: I1209 16:42:21.884533 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wlth4" Dec 09 16:42:21 crc kubenswrapper[4716]: I1209 16:42:21.906854 4716 scope.go:117] "RemoveContainer" containerID="8c60b6ef5f7c582dc57738574784ebd9d7d61bbc18c9b1ecbaef0dcedc6679db" Dec 09 16:42:21 crc kubenswrapper[4716]: I1209 16:42:21.925713 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wlth4"] Dec 09 16:42:21 crc kubenswrapper[4716]: I1209 16:42:21.937417 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-wlth4"] Dec 09 16:42:22 crc kubenswrapper[4716]: I1209 16:42:22.115992 4716 scope.go:117] "RemoveContainer" containerID="a13ea6ee2b5bfc11e6cd19a6a1c279eea16d2ad4a2cce875e51701957d081443" Dec 09 16:42:23 crc kubenswrapper[4716]: I1209 16:42:23.228735 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="604e3cba-62d9-49da-9651-14ed6837d49c" path="/var/lib/kubelet/pods/604e3cba-62d9-49da-9651-14ed6837d49c/volumes" Dec 09 16:42:25 crc kubenswrapper[4716]: E1209 16:42:25.219295 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:42:25 crc kubenswrapper[4716]: E1209 16:42:25.219409 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:42:29 crc kubenswrapper[4716]: I1209 16:42:29.280352 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8zv4vn_cc820795-3bec-42ce-b838-37592232bfb1/util/0.log" Dec 09 16:42:29 crc kubenswrapper[4716]: I1209 16:42:29.522763 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8zv4vn_cc820795-3bec-42ce-b838-37592232bfb1/pull/0.log" Dec 09 16:42:29 crc kubenswrapper[4716]: I1209 16:42:29.543859 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8zv4vn_cc820795-3bec-42ce-b838-37592232bfb1/util/0.log" Dec 09 16:42:29 crc kubenswrapper[4716]: I1209 16:42:29.571636 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8zv4vn_cc820795-3bec-42ce-b838-37592232bfb1/pull/0.log" Dec 09 16:42:29 crc kubenswrapper[4716]: I1209 16:42:29.764153 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8zv4vn_cc820795-3bec-42ce-b838-37592232bfb1/util/0.log" Dec 09 16:42:29 crc kubenswrapper[4716]: I1209 16:42:29.776481 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8zv4vn_cc820795-3bec-42ce-b838-37592232bfb1/extract/0.log" Dec 09 16:42:29 crc kubenswrapper[4716]: I1209 16:42:29.780645 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8zv4vn_cc820795-3bec-42ce-b838-37592232bfb1/pull/0.log" Dec 09 16:42:29 crc kubenswrapper[4716]: I1209 16:42:29.932433 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2p7dq_63c5a169-7a26-45fb-90ad-a3d2d43516fd/util/0.log" Dec 09 16:42:30 crc kubenswrapper[4716]: I1209 16:42:30.149601 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2p7dq_63c5a169-7a26-45fb-90ad-a3d2d43516fd/pull/0.log" Dec 09 16:42:30 crc kubenswrapper[4716]: I1209 16:42:30.169175 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2p7dq_63c5a169-7a26-45fb-90ad-a3d2d43516fd/pull/0.log" Dec 09 16:42:30 crc kubenswrapper[4716]: I1209 16:42:30.200237 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2p7dq_63c5a169-7a26-45fb-90ad-a3d2d43516fd/util/0.log" Dec 09 16:42:30 crc kubenswrapper[4716]: I1209 16:42:30.495949 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2p7dq_63c5a169-7a26-45fb-90ad-a3d2d43516fd/util/0.log" Dec 09 16:42:30 crc kubenswrapper[4716]: I1209 16:42:30.534735 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2p7dq_63c5a169-7a26-45fb-90ad-a3d2d43516fd/pull/0.log" Dec 09 16:42:31 crc kubenswrapper[4716]: I1209 16:42:31.333165 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2p7dq_63c5a169-7a26-45fb-90ad-a3d2d43516fd/extract/0.log" Dec 09 16:42:31 crc kubenswrapper[4716]: I1209 16:42:31.375607 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rqn55_d15321ce-4222-4dea-9cd4-addd3749023e/util/0.log" Dec 09 16:42:31 crc kubenswrapper[4716]: I1209 16:42:31.810297 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rqn55_d15321ce-4222-4dea-9cd4-addd3749023e/pull/0.log" Dec 09 16:42:31 crc kubenswrapper[4716]: I1209 16:42:31.871102 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rqn55_d15321ce-4222-4dea-9cd4-addd3749023e/util/0.log" Dec 09 16:42:31 crc kubenswrapper[4716]: I1209 16:42:31.871897 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rqn55_d15321ce-4222-4dea-9cd4-addd3749023e/pull/0.log" Dec 09 16:42:32 crc kubenswrapper[4716]: I1209 16:42:32.049898 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rqn55_d15321ce-4222-4dea-9cd4-addd3749023e/util/0.log" Dec 09 16:42:32 crc kubenswrapper[4716]: I1209 16:42:32.117540 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rqn55_d15321ce-4222-4dea-9cd4-addd3749023e/pull/0.log" Dec 09 16:42:32 crc kubenswrapper[4716]: I1209 16:42:32.120609 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210rqn55_d15321ce-4222-4dea-9cd4-addd3749023e/extract/0.log" Dec 09 16:42:32 crc kubenswrapper[4716]: I1209 16:42:32.281757 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f7l29s_a0ebcd27-55c5-41b1-9663-2b880809ff5d/util/0.log" Dec 09 16:42:32 crc kubenswrapper[4716]: I1209 16:42:32.534554 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f7l29s_a0ebcd27-55c5-41b1-9663-2b880809ff5d/util/0.log" Dec 09 16:42:32 crc kubenswrapper[4716]: I1209 16:42:32.536565 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f7l29s_a0ebcd27-55c5-41b1-9663-2b880809ff5d/pull/0.log" Dec 09 16:42:32 crc kubenswrapper[4716]: I1209 16:42:32.546674 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f7l29s_a0ebcd27-55c5-41b1-9663-2b880809ff5d/pull/0.log" Dec 09 16:42:33 crc kubenswrapper[4716]: I1209 16:42:33.172755 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f7l29s_a0ebcd27-55c5-41b1-9663-2b880809ff5d/pull/0.log" Dec 09 16:42:33 crc kubenswrapper[4716]: I1209 16:42:33.223816 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f7l29s_a0ebcd27-55c5-41b1-9663-2b880809ff5d/util/0.log" Dec 09 16:42:33 crc kubenswrapper[4716]: I1209 16:42:33.225128 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463f7l29s_a0ebcd27-55c5-41b1-9663-2b880809ff5d/extract/0.log" Dec 09 16:42:33 crc kubenswrapper[4716]: I1209 16:42:33.368024 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f839rhj4_f6b71818-3fd7-43c7-9603-cdec6e1fe6b4/util/0.log" Dec 09 16:42:33 crc kubenswrapper[4716]: I1209 16:42:33.577176 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f839rhj4_f6b71818-3fd7-43c7-9603-cdec6e1fe6b4/pull/0.log" Dec 09 16:42:33 crc kubenswrapper[4716]: I1209 16:42:33.577319 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f839rhj4_f6b71818-3fd7-43c7-9603-cdec6e1fe6b4/pull/0.log" Dec 09 16:42:33 crc kubenswrapper[4716]: I1209 16:42:33.581280 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f839rhj4_f6b71818-3fd7-43c7-9603-cdec6e1fe6b4/util/0.log" Dec 09 16:42:33 crc kubenswrapper[4716]: I1209 16:42:33.840951 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f839rhj4_f6b71818-3fd7-43c7-9603-cdec6e1fe6b4/pull/0.log" Dec 09 16:42:33 crc kubenswrapper[4716]: I1209 16:42:33.917112 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f839rhj4_f6b71818-3fd7-43c7-9603-cdec6e1fe6b4/extract/0.log" Dec 09 16:42:33 crc kubenswrapper[4716]: I1209 16:42:33.922815 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-8hj6t_83189221-c6eb-471f-b842-49eeafd93e8e/extract-utilities/0.log" Dec 09 16:42:33 crc kubenswrapper[4716]: I1209 16:42:33.928162 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f839rhj4_f6b71818-3fd7-43c7-9603-cdec6e1fe6b4/util/0.log" Dec 09 16:42:34 crc kubenswrapper[4716]: I1209 16:42:34.093948 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-8hj6t_83189221-c6eb-471f-b842-49eeafd93e8e/extract-utilities/0.log" Dec 09 16:42:34 crc kubenswrapper[4716]: I1209 16:42:34.114839 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-8hj6t_83189221-c6eb-471f-b842-49eeafd93e8e/extract-content/0.log" Dec 09 16:42:34 crc kubenswrapper[4716]: I1209 16:42:34.117755 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-8hj6t_83189221-c6eb-471f-b842-49eeafd93e8e/extract-content/0.log" Dec 09 16:42:34 crc kubenswrapper[4716]: I1209 16:42:34.335829 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-8hj6t_83189221-c6eb-471f-b842-49eeafd93e8e/extract-utilities/0.log" Dec 09 16:42:34 crc kubenswrapper[4716]: I1209 16:42:34.339236 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-8hj6t_83189221-c6eb-471f-b842-49eeafd93e8e/extract-content/0.log" Dec 09 16:42:34 crc kubenswrapper[4716]: I1209 16:42:34.348775 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2n29z_5044a327-be08-455c-b84a-8d2aec4c3bb0/extract-utilities/0.log" Dec 09 16:42:34 crc kubenswrapper[4716]: I1209 16:42:34.678271 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2n29z_5044a327-be08-455c-b84a-8d2aec4c3bb0/extract-utilities/0.log" Dec 09 16:42:34 crc kubenswrapper[4716]: I1209 16:42:34.689341 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-8hj6t_83189221-c6eb-471f-b842-49eeafd93e8e/registry-server/0.log" Dec 09 16:42:34 crc kubenswrapper[4716]: I1209 16:42:34.699317 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2n29z_5044a327-be08-455c-b84a-8d2aec4c3bb0/extract-content/0.log" Dec 09 16:42:34 crc kubenswrapper[4716]: I1209 16:42:34.706110 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2n29z_5044a327-be08-455c-b84a-8d2aec4c3bb0/extract-content/0.log" Dec 09 16:42:34 crc kubenswrapper[4716]: I1209 16:42:34.906537 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2n29z_5044a327-be08-455c-b84a-8d2aec4c3bb0/extract-utilities/0.log" Dec 09 16:42:34 crc kubenswrapper[4716]: I1209 16:42:34.918811 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2n29z_5044a327-be08-455c-b84a-8d2aec4c3bb0/extract-content/0.log" Dec 09 16:42:34 crc kubenswrapper[4716]: I1209 16:42:34.996085 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-g5h2p_9197123c-d444-4d8e-94cd-8a9f61317430/marketplace-operator/0.log" Dec 09 16:42:35 crc kubenswrapper[4716]: I1209 16:42:35.316659 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-ktf2s_47c82d22-477b-4bd0-afd3-9ae2fed959f0/extract-utilities/0.log" Dec 09 16:42:35 crc kubenswrapper[4716]: I1209 16:42:35.486678 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-ktf2s_47c82d22-477b-4bd0-afd3-9ae2fed959f0/extract-utilities/0.log" Dec 09 16:42:35 crc kubenswrapper[4716]: I1209 16:42:35.515913 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-ktf2s_47c82d22-477b-4bd0-afd3-9ae2fed959f0/extract-content/0.log" Dec 09 16:42:35 crc kubenswrapper[4716]: I1209 16:42:35.542746 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-ktf2s_47c82d22-477b-4bd0-afd3-9ae2fed959f0/extract-content/0.log" Dec 09 16:42:36 crc kubenswrapper[4716]: I1209 16:42:36.028351 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2n29z_5044a327-be08-455c-b84a-8d2aec4c3bb0/registry-server/0.log" Dec 09 16:42:36 crc kubenswrapper[4716]: I1209 16:42:36.077501 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-ktf2s_47c82d22-477b-4bd0-afd3-9ae2fed959f0/extract-utilities/0.log" Dec 09 16:42:36 crc kubenswrapper[4716]: I1209 16:42:36.091402 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-ktf2s_47c82d22-477b-4bd0-afd3-9ae2fed959f0/extract-content/0.log" Dec 09 16:42:36 crc kubenswrapper[4716]: E1209 16:42:36.220387 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:42:36 crc kubenswrapper[4716]: I1209 16:42:36.237571 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-ktf2s_47c82d22-477b-4bd0-afd3-9ae2fed959f0/registry-server/0.log" Dec 09 16:42:36 crc kubenswrapper[4716]: I1209 16:42:36.279605 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-7ljlb_ce1d3ff7-c6b0-4ad1-95e7-5e629f44cc14/extract-utilities/0.log" Dec 09 16:42:36 crc kubenswrapper[4716]: I1209 16:42:36.476572 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-7ljlb_ce1d3ff7-c6b0-4ad1-95e7-5e629f44cc14/extract-utilities/0.log" Dec 09 16:42:36 crc kubenswrapper[4716]: I1209 16:42:36.493405 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-7ljlb_ce1d3ff7-c6b0-4ad1-95e7-5e629f44cc14/extract-content/0.log" Dec 09 16:42:36 crc kubenswrapper[4716]: I1209 16:42:36.505871 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-7ljlb_ce1d3ff7-c6b0-4ad1-95e7-5e629f44cc14/extract-content/0.log" Dec 09 16:42:36 crc kubenswrapper[4716]: I1209 16:42:36.708921 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-7ljlb_ce1d3ff7-c6b0-4ad1-95e7-5e629f44cc14/extract-content/0.log" Dec 09 16:42:36 crc kubenswrapper[4716]: I1209 16:42:36.719305 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-7ljlb_ce1d3ff7-c6b0-4ad1-95e7-5e629f44cc14/extract-utilities/0.log" Dec 09 16:42:36 crc kubenswrapper[4716]: I1209 16:42:36.785873 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-7ljlb_ce1d3ff7-c6b0-4ad1-95e7-5e629f44cc14/registry-server/0.log" Dec 09 16:42:37 crc kubenswrapper[4716]: E1209 16:42:37.215702 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:42:48 crc kubenswrapper[4716]: E1209 16:42:48.222909 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:42:49 crc kubenswrapper[4716]: E1209 16:42:49.216003 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:42:50 crc kubenswrapper[4716]: I1209 16:42:50.645963 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-668cf9dfbb-jft8l_2631330f-8710-4364-a3ea-6b0455e189f5/prometheus-operator/0.log" Dec 09 16:42:50 crc kubenswrapper[4716]: I1209 16:42:50.767009 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-58d9d555d6-76kzp_ecae64e4-38fd-4c35-918b-fab84d70ad07/prometheus-operator-admission-webhook/0.log" Dec 09 16:42:50 crc kubenswrapper[4716]: I1209 16:42:50.890238 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-58d9d555d6-94jvz_a85a65c4-faa1-47a5-849c-0715edb9e29d/prometheus-operator-admission-webhook/0.log" Dec 09 16:42:51 crc kubenswrapper[4716]: I1209 16:42:51.030596 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-d8bb48f5d-hlfs7_ba73b247-86e8-4b3c-977f-a4bffb4f44e5/operator/0.log" Dec 09 16:42:51 crc kubenswrapper[4716]: I1209 16:42:51.119554 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-ui-dashboards-7d5fb4cbfb-226pf_0ea0c2c0-b980-461f-9f94-38eb8630a830/observability-ui-dashboards/0.log" Dec 09 16:42:51 crc kubenswrapper[4716]: I1209 16:42:51.257367 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5446b9c989-jxhr5_51530d86-c01a-4a79-909e-f653c5582af7/perses-operator/0.log" Dec 09 16:43:03 crc kubenswrapper[4716]: E1209 16:43:03.230806 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:43:04 crc kubenswrapper[4716]: E1209 16:43:04.215323 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:43:06 crc kubenswrapper[4716]: I1209 16:43:06.878474 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-9bd696f86-gbtzx_b564ab42-fe8b-4b45-a502-0d10a2cedd07/kube-rbac-proxy/0.log" Dec 09 16:43:06 crc kubenswrapper[4716]: I1209 16:43:06.894903 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-9bd696f86-gbtzx_b564ab42-fe8b-4b45-a502-0d10a2cedd07/manager/0.log" Dec 09 16:43:15 crc kubenswrapper[4716]: E1209 16:43:15.216362 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:43:18 crc kubenswrapper[4716]: E1209 16:43:18.217713 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:43:28 crc kubenswrapper[4716]: E1209 16:43:28.215421 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:43:29 crc kubenswrapper[4716]: E1209 16:43:29.223373 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:43:41 crc kubenswrapper[4716]: E1209 16:43:41.223070 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:43:43 crc kubenswrapper[4716]: E1209 16:43:43.225319 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:43:54 crc kubenswrapper[4716]: E1209 16:43:54.216604 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:43:56 crc kubenswrapper[4716]: E1209 16:43:56.216217 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:44:08 crc kubenswrapper[4716]: E1209 16:44:08.215251 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:44:09 crc kubenswrapper[4716]: E1209 16:44:09.223711 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:44:17 crc kubenswrapper[4716]: I1209 16:44:17.922585 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 16:44:17 crc kubenswrapper[4716]: I1209 16:44:17.923182 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 16:44:20 crc kubenswrapper[4716]: E1209 16:44:20.216057 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:44:22 crc kubenswrapper[4716]: I1209 16:44:22.217223 4716 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 09 16:44:22 crc kubenswrapper[4716]: E1209 16:44:22.339817 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 09 16:44:22 crc kubenswrapper[4716]: E1209 16:44:22.339886 4716 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 09 16:44:22 crc kubenswrapper[4716]: E1209 16:44:22.340036 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qfm49,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-msshl_openstack(239c4119-944c-46e1-9425-285eeb6e0204): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 16:44:22 crc kubenswrapper[4716]: E1209 16:44:22.343249 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:44:31 crc kubenswrapper[4716]: E1209 16:44:31.217340 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:44:37 crc kubenswrapper[4716]: E1209 16:44:37.220579 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:44:46 crc kubenswrapper[4716]: E1209 16:44:46.350313 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 16:44:46 crc kubenswrapper[4716]: E1209 16:44:46.351014 4716 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 16:44:46 crc kubenswrapper[4716]: E1209 16:44:46.351161 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5fbh9fh77h667h669h94h589h59dh5bdh5bch9fh68h689h699h559h549h56fh5f5h68fh557h5cch7fh66ch6ch86h56h75h55fh58h5ffhf9h5fdq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lz724,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(29cebb2d-8cdb-49de-a29d-1d02808e46a9): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 16:44:46 crc kubenswrapper[4716]: E1209 16:44:46.352331 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:44:47 crc kubenswrapper[4716]: I1209 16:44:47.921926 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 16:44:47 crc kubenswrapper[4716]: I1209 16:44:47.922261 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 16:44:49 crc kubenswrapper[4716]: I1209 16:44:49.723312 4716 generic.go:334] "Generic (PLEG): container finished" podID="32e41e7d-e621-42a2-a0e3-1e7d816dbe22" containerID="87640b9324691c8ac6ebba02b51cc1edcad96b2e22cc8a317e9289e05431be68" exitCode=0 Dec 09 16:44:49 crc kubenswrapper[4716]: I1209 16:44:49.723402 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6kmv6/must-gather-8b94b" event={"ID":"32e41e7d-e621-42a2-a0e3-1e7d816dbe22","Type":"ContainerDied","Data":"87640b9324691c8ac6ebba02b51cc1edcad96b2e22cc8a317e9289e05431be68"} Dec 09 16:44:49 crc kubenswrapper[4716]: I1209 16:44:49.724557 4716 scope.go:117] "RemoveContainer" containerID="87640b9324691c8ac6ebba02b51cc1edcad96b2e22cc8a317e9289e05431be68" Dec 09 16:44:50 crc kubenswrapper[4716]: I1209 16:44:50.206445 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-6kmv6_must-gather-8b94b_32e41e7d-e621-42a2-a0e3-1e7d816dbe22/gather/0.log" Dec 09 16:44:52 crc kubenswrapper[4716]: E1209 16:44:52.217231 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:44:59 crc kubenswrapper[4716]: I1209 16:44:59.040869 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-6kmv6/must-gather-8b94b"] Dec 09 16:44:59 crc kubenswrapper[4716]: I1209 16:44:59.041715 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-6kmv6/must-gather-8b94b" podUID="32e41e7d-e621-42a2-a0e3-1e7d816dbe22" containerName="copy" containerID="cri-o://004eddbbaa5b1596e0ffd6a9223e85d4e03a6cd1cba8e272d5aa8dbb30711ef9" gracePeriod=2 Dec 09 16:44:59 crc kubenswrapper[4716]: I1209 16:44:59.052774 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-6kmv6/must-gather-8b94b"] Dec 09 16:44:59 crc kubenswrapper[4716]: I1209 16:44:59.713983 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-6kmv6_must-gather-8b94b_32e41e7d-e621-42a2-a0e3-1e7d816dbe22/copy/0.log" Dec 09 16:44:59 crc kubenswrapper[4716]: I1209 16:44:59.715057 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6kmv6/must-gather-8b94b" Dec 09 16:44:59 crc kubenswrapper[4716]: I1209 16:44:59.790492 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4bxtk\" (UniqueName: \"kubernetes.io/projected/32e41e7d-e621-42a2-a0e3-1e7d816dbe22-kube-api-access-4bxtk\") pod \"32e41e7d-e621-42a2-a0e3-1e7d816dbe22\" (UID: \"32e41e7d-e621-42a2-a0e3-1e7d816dbe22\") " Dec 09 16:44:59 crc kubenswrapper[4716]: I1209 16:44:59.790689 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/32e41e7d-e621-42a2-a0e3-1e7d816dbe22-must-gather-output\") pod \"32e41e7d-e621-42a2-a0e3-1e7d816dbe22\" (UID: \"32e41e7d-e621-42a2-a0e3-1e7d816dbe22\") " Dec 09 16:44:59 crc kubenswrapper[4716]: I1209 16:44:59.797258 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/32e41e7d-e621-42a2-a0e3-1e7d816dbe22-kube-api-access-4bxtk" (OuterVolumeSpecName: "kube-api-access-4bxtk") pod "32e41e7d-e621-42a2-a0e3-1e7d816dbe22" (UID: "32e41e7d-e621-42a2-a0e3-1e7d816dbe22"). InnerVolumeSpecName "kube-api-access-4bxtk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:44:59 crc kubenswrapper[4716]: I1209 16:44:59.825862 4716 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-6kmv6_must-gather-8b94b_32e41e7d-e621-42a2-a0e3-1e7d816dbe22/copy/0.log" Dec 09 16:44:59 crc kubenswrapper[4716]: I1209 16:44:59.826241 4716 generic.go:334] "Generic (PLEG): container finished" podID="32e41e7d-e621-42a2-a0e3-1e7d816dbe22" containerID="004eddbbaa5b1596e0ffd6a9223e85d4e03a6cd1cba8e272d5aa8dbb30711ef9" exitCode=143 Dec 09 16:44:59 crc kubenswrapper[4716]: I1209 16:44:59.826298 4716 scope.go:117] "RemoveContainer" containerID="004eddbbaa5b1596e0ffd6a9223e85d4e03a6cd1cba8e272d5aa8dbb30711ef9" Dec 09 16:44:59 crc kubenswrapper[4716]: I1209 16:44:59.826464 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6kmv6/must-gather-8b94b" Dec 09 16:44:59 crc kubenswrapper[4716]: I1209 16:44:59.894548 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4bxtk\" (UniqueName: \"kubernetes.io/projected/32e41e7d-e621-42a2-a0e3-1e7d816dbe22-kube-api-access-4bxtk\") on node \"crc\" DevicePath \"\"" Dec 09 16:44:59 crc kubenswrapper[4716]: I1209 16:44:59.897301 4716 scope.go:117] "RemoveContainer" containerID="87640b9324691c8ac6ebba02b51cc1edcad96b2e22cc8a317e9289e05431be68" Dec 09 16:44:59 crc kubenswrapper[4716]: I1209 16:44:59.954127 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/32e41e7d-e621-42a2-a0e3-1e7d816dbe22-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "32e41e7d-e621-42a2-a0e3-1e7d816dbe22" (UID: "32e41e7d-e621-42a2-a0e3-1e7d816dbe22"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:44:59 crc kubenswrapper[4716]: I1209 16:44:59.955140 4716 scope.go:117] "RemoveContainer" containerID="004eddbbaa5b1596e0ffd6a9223e85d4e03a6cd1cba8e272d5aa8dbb30711ef9" Dec 09 16:44:59 crc kubenswrapper[4716]: E1209 16:44:59.958586 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"004eddbbaa5b1596e0ffd6a9223e85d4e03a6cd1cba8e272d5aa8dbb30711ef9\": container with ID starting with 004eddbbaa5b1596e0ffd6a9223e85d4e03a6cd1cba8e272d5aa8dbb30711ef9 not found: ID does not exist" containerID="004eddbbaa5b1596e0ffd6a9223e85d4e03a6cd1cba8e272d5aa8dbb30711ef9" Dec 09 16:44:59 crc kubenswrapper[4716]: I1209 16:44:59.958649 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"004eddbbaa5b1596e0ffd6a9223e85d4e03a6cd1cba8e272d5aa8dbb30711ef9"} err="failed to get container status \"004eddbbaa5b1596e0ffd6a9223e85d4e03a6cd1cba8e272d5aa8dbb30711ef9\": rpc error: code = NotFound desc = could not find container \"004eddbbaa5b1596e0ffd6a9223e85d4e03a6cd1cba8e272d5aa8dbb30711ef9\": container with ID starting with 004eddbbaa5b1596e0ffd6a9223e85d4e03a6cd1cba8e272d5aa8dbb30711ef9 not found: ID does not exist" Dec 09 16:44:59 crc kubenswrapper[4716]: I1209 16:44:59.958678 4716 scope.go:117] "RemoveContainer" containerID="87640b9324691c8ac6ebba02b51cc1edcad96b2e22cc8a317e9289e05431be68" Dec 09 16:44:59 crc kubenswrapper[4716]: E1209 16:44:59.960890 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"87640b9324691c8ac6ebba02b51cc1edcad96b2e22cc8a317e9289e05431be68\": container with ID starting with 87640b9324691c8ac6ebba02b51cc1edcad96b2e22cc8a317e9289e05431be68 not found: ID does not exist" containerID="87640b9324691c8ac6ebba02b51cc1edcad96b2e22cc8a317e9289e05431be68" Dec 09 16:44:59 crc kubenswrapper[4716]: I1209 16:44:59.960917 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"87640b9324691c8ac6ebba02b51cc1edcad96b2e22cc8a317e9289e05431be68"} err="failed to get container status \"87640b9324691c8ac6ebba02b51cc1edcad96b2e22cc8a317e9289e05431be68\": rpc error: code = NotFound desc = could not find container \"87640b9324691c8ac6ebba02b51cc1edcad96b2e22cc8a317e9289e05431be68\": container with ID starting with 87640b9324691c8ac6ebba02b51cc1edcad96b2e22cc8a317e9289e05431be68 not found: ID does not exist" Dec 09 16:44:59 crc kubenswrapper[4716]: I1209 16:44:59.997302 4716 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/32e41e7d-e621-42a2-a0e3-1e7d816dbe22-must-gather-output\") on node \"crc\" DevicePath \"\"" Dec 09 16:45:00 crc kubenswrapper[4716]: I1209 16:45:00.176125 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421645-zt9vm"] Dec 09 16:45:00 crc kubenswrapper[4716]: E1209 16:45:00.182351 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="604e3cba-62d9-49da-9651-14ed6837d49c" containerName="extract-content" Dec 09 16:45:00 crc kubenswrapper[4716]: I1209 16:45:00.182385 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="604e3cba-62d9-49da-9651-14ed6837d49c" containerName="extract-content" Dec 09 16:45:00 crc kubenswrapper[4716]: E1209 16:45:00.182406 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="252727f3-87c7-420e-9bc2-5a7399f2e0e7" containerName="extract-utilities" Dec 09 16:45:00 crc kubenswrapper[4716]: I1209 16:45:00.182413 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="252727f3-87c7-420e-9bc2-5a7399f2e0e7" containerName="extract-utilities" Dec 09 16:45:00 crc kubenswrapper[4716]: E1209 16:45:00.182420 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="604e3cba-62d9-49da-9651-14ed6837d49c" containerName="extract-utilities" Dec 09 16:45:00 crc kubenswrapper[4716]: I1209 16:45:00.182428 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="604e3cba-62d9-49da-9651-14ed6837d49c" containerName="extract-utilities" Dec 09 16:45:00 crc kubenswrapper[4716]: E1209 16:45:00.182445 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32e41e7d-e621-42a2-a0e3-1e7d816dbe22" containerName="copy" Dec 09 16:45:00 crc kubenswrapper[4716]: I1209 16:45:00.182451 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="32e41e7d-e621-42a2-a0e3-1e7d816dbe22" containerName="copy" Dec 09 16:45:00 crc kubenswrapper[4716]: E1209 16:45:00.182462 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="604e3cba-62d9-49da-9651-14ed6837d49c" containerName="registry-server" Dec 09 16:45:00 crc kubenswrapper[4716]: I1209 16:45:00.182468 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="604e3cba-62d9-49da-9651-14ed6837d49c" containerName="registry-server" Dec 09 16:45:00 crc kubenswrapper[4716]: E1209 16:45:00.182483 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="252727f3-87c7-420e-9bc2-5a7399f2e0e7" containerName="registry-server" Dec 09 16:45:00 crc kubenswrapper[4716]: I1209 16:45:00.182489 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="252727f3-87c7-420e-9bc2-5a7399f2e0e7" containerName="registry-server" Dec 09 16:45:00 crc kubenswrapper[4716]: E1209 16:45:00.182507 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32e41e7d-e621-42a2-a0e3-1e7d816dbe22" containerName="gather" Dec 09 16:45:00 crc kubenswrapper[4716]: I1209 16:45:00.182514 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="32e41e7d-e621-42a2-a0e3-1e7d816dbe22" containerName="gather" Dec 09 16:45:00 crc kubenswrapper[4716]: E1209 16:45:00.182523 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="252727f3-87c7-420e-9bc2-5a7399f2e0e7" containerName="extract-content" Dec 09 16:45:00 crc kubenswrapper[4716]: I1209 16:45:00.182529 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="252727f3-87c7-420e-9bc2-5a7399f2e0e7" containerName="extract-content" Dec 09 16:45:00 crc kubenswrapper[4716]: I1209 16:45:00.182781 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="32e41e7d-e621-42a2-a0e3-1e7d816dbe22" containerName="copy" Dec 09 16:45:00 crc kubenswrapper[4716]: I1209 16:45:00.182798 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="32e41e7d-e621-42a2-a0e3-1e7d816dbe22" containerName="gather" Dec 09 16:45:00 crc kubenswrapper[4716]: I1209 16:45:00.182819 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="252727f3-87c7-420e-9bc2-5a7399f2e0e7" containerName="registry-server" Dec 09 16:45:00 crc kubenswrapper[4716]: I1209 16:45:00.182833 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="604e3cba-62d9-49da-9651-14ed6837d49c" containerName="registry-server" Dec 09 16:45:00 crc kubenswrapper[4716]: I1209 16:45:00.183861 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421645-zt9vm" Dec 09 16:45:00 crc kubenswrapper[4716]: I1209 16:45:00.186384 4716 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 09 16:45:00 crc kubenswrapper[4716]: I1209 16:45:00.189367 4716 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 09 16:45:00 crc kubenswrapper[4716]: I1209 16:45:00.209453 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421645-zt9vm"] Dec 09 16:45:00 crc kubenswrapper[4716]: E1209 16:45:00.216202 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:45:00 crc kubenswrapper[4716]: I1209 16:45:00.308210 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ckq6b\" (UniqueName: \"kubernetes.io/projected/b0e6403a-0b7b-41b2-ab3b-46e0c6416d63-kube-api-access-ckq6b\") pod \"collect-profiles-29421645-zt9vm\" (UID: \"b0e6403a-0b7b-41b2-ab3b-46e0c6416d63\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421645-zt9vm" Dec 09 16:45:00 crc kubenswrapper[4716]: I1209 16:45:00.309395 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b0e6403a-0b7b-41b2-ab3b-46e0c6416d63-config-volume\") pod \"collect-profiles-29421645-zt9vm\" (UID: \"b0e6403a-0b7b-41b2-ab3b-46e0c6416d63\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421645-zt9vm" Dec 09 16:45:00 crc kubenswrapper[4716]: I1209 16:45:00.309457 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b0e6403a-0b7b-41b2-ab3b-46e0c6416d63-secret-volume\") pod \"collect-profiles-29421645-zt9vm\" (UID: \"b0e6403a-0b7b-41b2-ab3b-46e0c6416d63\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421645-zt9vm" Dec 09 16:45:00 crc kubenswrapper[4716]: I1209 16:45:00.415926 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b0e6403a-0b7b-41b2-ab3b-46e0c6416d63-config-volume\") pod \"collect-profiles-29421645-zt9vm\" (UID: \"b0e6403a-0b7b-41b2-ab3b-46e0c6416d63\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421645-zt9vm" Dec 09 16:45:00 crc kubenswrapper[4716]: I1209 16:45:00.415995 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b0e6403a-0b7b-41b2-ab3b-46e0c6416d63-secret-volume\") pod \"collect-profiles-29421645-zt9vm\" (UID: \"b0e6403a-0b7b-41b2-ab3b-46e0c6416d63\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421645-zt9vm" Dec 09 16:45:00 crc kubenswrapper[4716]: I1209 16:45:00.416535 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ckq6b\" (UniqueName: \"kubernetes.io/projected/b0e6403a-0b7b-41b2-ab3b-46e0c6416d63-kube-api-access-ckq6b\") pod \"collect-profiles-29421645-zt9vm\" (UID: \"b0e6403a-0b7b-41b2-ab3b-46e0c6416d63\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421645-zt9vm" Dec 09 16:45:00 crc kubenswrapper[4716]: I1209 16:45:00.417223 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b0e6403a-0b7b-41b2-ab3b-46e0c6416d63-config-volume\") pod \"collect-profiles-29421645-zt9vm\" (UID: \"b0e6403a-0b7b-41b2-ab3b-46e0c6416d63\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421645-zt9vm" Dec 09 16:45:00 crc kubenswrapper[4716]: I1209 16:45:00.423752 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b0e6403a-0b7b-41b2-ab3b-46e0c6416d63-secret-volume\") pod \"collect-profiles-29421645-zt9vm\" (UID: \"b0e6403a-0b7b-41b2-ab3b-46e0c6416d63\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421645-zt9vm" Dec 09 16:45:00 crc kubenswrapper[4716]: I1209 16:45:00.440641 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ckq6b\" (UniqueName: \"kubernetes.io/projected/b0e6403a-0b7b-41b2-ab3b-46e0c6416d63-kube-api-access-ckq6b\") pod \"collect-profiles-29421645-zt9vm\" (UID: \"b0e6403a-0b7b-41b2-ab3b-46e0c6416d63\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29421645-zt9vm" Dec 09 16:45:00 crc kubenswrapper[4716]: I1209 16:45:00.510259 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421645-zt9vm" Dec 09 16:45:00 crc kubenswrapper[4716]: I1209 16:45:00.985597 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421645-zt9vm"] Dec 09 16:45:00 crc kubenswrapper[4716]: W1209 16:45:00.992131 4716 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb0e6403a_0b7b_41b2_ab3b_46e0c6416d63.slice/crio-f06302291620c3ae44d0776a0969d2af30cf19635086141412ce8f8154f97764 WatchSource:0}: Error finding container f06302291620c3ae44d0776a0969d2af30cf19635086141412ce8f8154f97764: Status 404 returned error can't find the container with id f06302291620c3ae44d0776a0969d2af30cf19635086141412ce8f8154f97764 Dec 09 16:45:01 crc kubenswrapper[4716]: I1209 16:45:01.228110 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="32e41e7d-e621-42a2-a0e3-1e7d816dbe22" path="/var/lib/kubelet/pods/32e41e7d-e621-42a2-a0e3-1e7d816dbe22/volumes" Dec 09 16:45:01 crc kubenswrapper[4716]: I1209 16:45:01.850615 4716 generic.go:334] "Generic (PLEG): container finished" podID="b0e6403a-0b7b-41b2-ab3b-46e0c6416d63" containerID="9a517bcb3c3a3a25ecdb3321bc833b07a6dac46be7e08ec3c15e4f5b2506ae48" exitCode=0 Dec 09 16:45:01 crc kubenswrapper[4716]: I1209 16:45:01.850748 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421645-zt9vm" event={"ID":"b0e6403a-0b7b-41b2-ab3b-46e0c6416d63","Type":"ContainerDied","Data":"9a517bcb3c3a3a25ecdb3321bc833b07a6dac46be7e08ec3c15e4f5b2506ae48"} Dec 09 16:45:01 crc kubenswrapper[4716]: I1209 16:45:01.850968 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421645-zt9vm" event={"ID":"b0e6403a-0b7b-41b2-ab3b-46e0c6416d63","Type":"ContainerStarted","Data":"f06302291620c3ae44d0776a0969d2af30cf19635086141412ce8f8154f97764"} Dec 09 16:45:03 crc kubenswrapper[4716]: I1209 16:45:03.294245 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421645-zt9vm" Dec 09 16:45:03 crc kubenswrapper[4716]: I1209 16:45:03.390731 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ckq6b\" (UniqueName: \"kubernetes.io/projected/b0e6403a-0b7b-41b2-ab3b-46e0c6416d63-kube-api-access-ckq6b\") pod \"b0e6403a-0b7b-41b2-ab3b-46e0c6416d63\" (UID: \"b0e6403a-0b7b-41b2-ab3b-46e0c6416d63\") " Dec 09 16:45:03 crc kubenswrapper[4716]: I1209 16:45:03.392557 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b0e6403a-0b7b-41b2-ab3b-46e0c6416d63-config-volume\") pod \"b0e6403a-0b7b-41b2-ab3b-46e0c6416d63\" (UID: \"b0e6403a-0b7b-41b2-ab3b-46e0c6416d63\") " Dec 09 16:45:03 crc kubenswrapper[4716]: I1209 16:45:03.392661 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b0e6403a-0b7b-41b2-ab3b-46e0c6416d63-secret-volume\") pod \"b0e6403a-0b7b-41b2-ab3b-46e0c6416d63\" (UID: \"b0e6403a-0b7b-41b2-ab3b-46e0c6416d63\") " Dec 09 16:45:03 crc kubenswrapper[4716]: I1209 16:45:03.393148 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b0e6403a-0b7b-41b2-ab3b-46e0c6416d63-config-volume" (OuterVolumeSpecName: "config-volume") pod "b0e6403a-0b7b-41b2-ab3b-46e0c6416d63" (UID: "b0e6403a-0b7b-41b2-ab3b-46e0c6416d63"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 09 16:45:03 crc kubenswrapper[4716]: I1209 16:45:03.393792 4716 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b0e6403a-0b7b-41b2-ab3b-46e0c6416d63-config-volume\") on node \"crc\" DevicePath \"\"" Dec 09 16:45:03 crc kubenswrapper[4716]: I1209 16:45:03.398845 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b0e6403a-0b7b-41b2-ab3b-46e0c6416d63-kube-api-access-ckq6b" (OuterVolumeSpecName: "kube-api-access-ckq6b") pod "b0e6403a-0b7b-41b2-ab3b-46e0c6416d63" (UID: "b0e6403a-0b7b-41b2-ab3b-46e0c6416d63"). InnerVolumeSpecName "kube-api-access-ckq6b". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:45:03 crc kubenswrapper[4716]: I1209 16:45:03.398952 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b0e6403a-0b7b-41b2-ab3b-46e0c6416d63-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "b0e6403a-0b7b-41b2-ab3b-46e0c6416d63" (UID: "b0e6403a-0b7b-41b2-ab3b-46e0c6416d63"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 09 16:45:03 crc kubenswrapper[4716]: I1209 16:45:03.500064 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ckq6b\" (UniqueName: \"kubernetes.io/projected/b0e6403a-0b7b-41b2-ab3b-46e0c6416d63-kube-api-access-ckq6b\") on node \"crc\" DevicePath \"\"" Dec 09 16:45:03 crc kubenswrapper[4716]: I1209 16:45:03.500116 4716 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b0e6403a-0b7b-41b2-ab3b-46e0c6416d63-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 09 16:45:03 crc kubenswrapper[4716]: I1209 16:45:03.874905 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29421645-zt9vm" event={"ID":"b0e6403a-0b7b-41b2-ab3b-46e0c6416d63","Type":"ContainerDied","Data":"f06302291620c3ae44d0776a0969d2af30cf19635086141412ce8f8154f97764"} Dec 09 16:45:03 crc kubenswrapper[4716]: I1209 16:45:03.875773 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f06302291620c3ae44d0776a0969d2af30cf19635086141412ce8f8154f97764" Dec 09 16:45:03 crc kubenswrapper[4716]: I1209 16:45:03.875848 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29421645-zt9vm" Dec 09 16:45:04 crc kubenswrapper[4716]: I1209 16:45:04.400761 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421600-vwwnm"] Dec 09 16:45:04 crc kubenswrapper[4716]: I1209 16:45:04.415515 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29421600-vwwnm"] Dec 09 16:45:05 crc kubenswrapper[4716]: I1209 16:45:05.227285 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="26e6b88c-3937-4dac-a165-9df99ab47f6b" path="/var/lib/kubelet/pods/26e6b88c-3937-4dac-a165-9df99ab47f6b/volumes" Dec 09 16:45:07 crc kubenswrapper[4716]: E1209 16:45:07.217977 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:45:11 crc kubenswrapper[4716]: E1209 16:45:11.218519 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:45:16 crc kubenswrapper[4716]: I1209 16:45:16.387698 4716 scope.go:117] "RemoveContainer" containerID="c4540e925e0308a1a9fb8cb88505378819683762b733002e41a631e438dcff11" Dec 09 16:45:16 crc kubenswrapper[4716]: I1209 16:45:16.418117 4716 scope.go:117] "RemoveContainer" containerID="0c71982a76d9ab1fcf06031eafb4b29ae75c05ad2a6601a4265f228933599b80" Dec 09 16:45:17 crc kubenswrapper[4716]: I1209 16:45:17.922337 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 16:45:17 crc kubenswrapper[4716]: I1209 16:45:17.923464 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 16:45:17 crc kubenswrapper[4716]: I1209 16:45:17.923603 4716 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" Dec 09 16:45:17 crc kubenswrapper[4716]: I1209 16:45:17.924841 4716 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"25624bd532411991661615df205e51c08162f3ddb1213bad665cdcf2e9f1cb15"} pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 16:45:17 crc kubenswrapper[4716]: I1209 16:45:17.924998 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" containerID="cri-o://25624bd532411991661615df205e51c08162f3ddb1213bad665cdcf2e9f1cb15" gracePeriod=600 Dec 09 16:45:18 crc kubenswrapper[4716]: I1209 16:45:18.093812 4716 generic.go:334] "Generic (PLEG): container finished" podID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerID="25624bd532411991661615df205e51c08162f3ddb1213bad665cdcf2e9f1cb15" exitCode=0 Dec 09 16:45:18 crc kubenswrapper[4716]: I1209 16:45:18.093867 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerDied","Data":"25624bd532411991661615df205e51c08162f3ddb1213bad665cdcf2e9f1cb15"} Dec 09 16:45:18 crc kubenswrapper[4716]: I1209 16:45:18.093915 4716 scope.go:117] "RemoveContainer" containerID="dda6ec4e8c901cf880782dc4aa7fc34b02c4276f74fb441e2350127e4d167244" Dec 09 16:45:19 crc kubenswrapper[4716]: I1209 16:45:19.106380 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerStarted","Data":"38fb42fdc21ab06818d4fd8811cd4e2755a301a7b0bd8235e0fb2d36e44f6ea3"} Dec 09 16:45:22 crc kubenswrapper[4716]: E1209 16:45:22.216464 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:45:25 crc kubenswrapper[4716]: E1209 16:45:25.216607 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:45:33 crc kubenswrapper[4716]: E1209 16:45:33.225766 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:45:40 crc kubenswrapper[4716]: E1209 16:45:40.216046 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:45:48 crc kubenswrapper[4716]: E1209 16:45:48.216184 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:45:52 crc kubenswrapper[4716]: E1209 16:45:52.216178 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:46:00 crc kubenswrapper[4716]: E1209 16:46:00.216373 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:46:05 crc kubenswrapper[4716]: E1209 16:46:05.220059 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:46:14 crc kubenswrapper[4716]: E1209 16:46:14.216168 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:46:20 crc kubenswrapper[4716]: E1209 16:46:20.217164 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:46:28 crc kubenswrapper[4716]: E1209 16:46:28.216102 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:46:31 crc kubenswrapper[4716]: E1209 16:46:31.219887 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:46:42 crc kubenswrapper[4716]: E1209 16:46:42.218103 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:46:46 crc kubenswrapper[4716]: E1209 16:46:46.223220 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:46:54 crc kubenswrapper[4716]: E1209 16:46:54.217149 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:46:58 crc kubenswrapper[4716]: E1209 16:46:58.216240 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:47:09 crc kubenswrapper[4716]: E1209 16:47:09.215920 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:47:09 crc kubenswrapper[4716]: E1209 16:47:09.215931 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:47:22 crc kubenswrapper[4716]: E1209 16:47:22.218133 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:47:23 crc kubenswrapper[4716]: E1209 16:47:23.224377 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:47:34 crc kubenswrapper[4716]: E1209 16:47:34.217567 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:47:36 crc kubenswrapper[4716]: E1209 16:47:36.216211 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:47:47 crc kubenswrapper[4716]: E1209 16:47:47.217167 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:47:47 crc kubenswrapper[4716]: I1209 16:47:47.921975 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 16:47:47 crc kubenswrapper[4716]: I1209 16:47:47.922035 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 16:47:49 crc kubenswrapper[4716]: E1209 16:47:49.216451 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:47:58 crc kubenswrapper[4716]: E1209 16:47:58.216153 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:48:00 crc kubenswrapper[4716]: E1209 16:48:00.216113 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:48:11 crc kubenswrapper[4716]: E1209 16:48:11.216190 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:48:14 crc kubenswrapper[4716]: E1209 16:48:14.216667 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:48:16 crc kubenswrapper[4716]: I1209 16:48:16.592175 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-7bg46"] Dec 09 16:48:16 crc kubenswrapper[4716]: E1209 16:48:16.593059 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0e6403a-0b7b-41b2-ab3b-46e0c6416d63" containerName="collect-profiles" Dec 09 16:48:16 crc kubenswrapper[4716]: I1209 16:48:16.593076 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0e6403a-0b7b-41b2-ab3b-46e0c6416d63" containerName="collect-profiles" Dec 09 16:48:16 crc kubenswrapper[4716]: I1209 16:48:16.593322 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0e6403a-0b7b-41b2-ab3b-46e0c6416d63" containerName="collect-profiles" Dec 09 16:48:16 crc kubenswrapper[4716]: I1209 16:48:16.595184 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7bg46" Dec 09 16:48:16 crc kubenswrapper[4716]: I1209 16:48:16.611575 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7bg46"] Dec 09 16:48:16 crc kubenswrapper[4716]: I1209 16:48:16.695918 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pj49z\" (UniqueName: \"kubernetes.io/projected/9b24f152-893f-4fc5-a2dc-c47afb40840e-kube-api-access-pj49z\") pod \"community-operators-7bg46\" (UID: \"9b24f152-893f-4fc5-a2dc-c47afb40840e\") " pod="openshift-marketplace/community-operators-7bg46" Dec 09 16:48:16 crc kubenswrapper[4716]: I1209 16:48:16.696017 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b24f152-893f-4fc5-a2dc-c47afb40840e-catalog-content\") pod \"community-operators-7bg46\" (UID: \"9b24f152-893f-4fc5-a2dc-c47afb40840e\") " pod="openshift-marketplace/community-operators-7bg46" Dec 09 16:48:16 crc kubenswrapper[4716]: I1209 16:48:16.696054 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b24f152-893f-4fc5-a2dc-c47afb40840e-utilities\") pod \"community-operators-7bg46\" (UID: \"9b24f152-893f-4fc5-a2dc-c47afb40840e\") " pod="openshift-marketplace/community-operators-7bg46" Dec 09 16:48:16 crc kubenswrapper[4716]: I1209 16:48:16.798870 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b24f152-893f-4fc5-a2dc-c47afb40840e-catalog-content\") pod \"community-operators-7bg46\" (UID: \"9b24f152-893f-4fc5-a2dc-c47afb40840e\") " pod="openshift-marketplace/community-operators-7bg46" Dec 09 16:48:16 crc kubenswrapper[4716]: I1209 16:48:16.798949 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b24f152-893f-4fc5-a2dc-c47afb40840e-utilities\") pod \"community-operators-7bg46\" (UID: \"9b24f152-893f-4fc5-a2dc-c47afb40840e\") " pod="openshift-marketplace/community-operators-7bg46" Dec 09 16:48:16 crc kubenswrapper[4716]: I1209 16:48:16.799223 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pj49z\" (UniqueName: \"kubernetes.io/projected/9b24f152-893f-4fc5-a2dc-c47afb40840e-kube-api-access-pj49z\") pod \"community-operators-7bg46\" (UID: \"9b24f152-893f-4fc5-a2dc-c47afb40840e\") " pod="openshift-marketplace/community-operators-7bg46" Dec 09 16:48:16 crc kubenswrapper[4716]: I1209 16:48:16.799539 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b24f152-893f-4fc5-a2dc-c47afb40840e-catalog-content\") pod \"community-operators-7bg46\" (UID: \"9b24f152-893f-4fc5-a2dc-c47afb40840e\") " pod="openshift-marketplace/community-operators-7bg46" Dec 09 16:48:16 crc kubenswrapper[4716]: I1209 16:48:16.799576 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b24f152-893f-4fc5-a2dc-c47afb40840e-utilities\") pod \"community-operators-7bg46\" (UID: \"9b24f152-893f-4fc5-a2dc-c47afb40840e\") " pod="openshift-marketplace/community-operators-7bg46" Dec 09 16:48:16 crc kubenswrapper[4716]: I1209 16:48:16.821124 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pj49z\" (UniqueName: \"kubernetes.io/projected/9b24f152-893f-4fc5-a2dc-c47afb40840e-kube-api-access-pj49z\") pod \"community-operators-7bg46\" (UID: \"9b24f152-893f-4fc5-a2dc-c47afb40840e\") " pod="openshift-marketplace/community-operators-7bg46" Dec 09 16:48:16 crc kubenswrapper[4716]: I1209 16:48:16.918994 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7bg46" Dec 09 16:48:17 crc kubenswrapper[4716]: I1209 16:48:17.537862 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7bg46"] Dec 09 16:48:17 crc kubenswrapper[4716]: I1209 16:48:17.921942 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 16:48:17 crc kubenswrapper[4716]: I1209 16:48:17.922343 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 16:48:18 crc kubenswrapper[4716]: I1209 16:48:18.070132 4716 generic.go:334] "Generic (PLEG): container finished" podID="9b24f152-893f-4fc5-a2dc-c47afb40840e" containerID="a0eb94cd90c7ec74490cd08b0130dbad6049bf580545a82c4fcf639132bc1fb4" exitCode=0 Dec 09 16:48:18 crc kubenswrapper[4716]: I1209 16:48:18.070188 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7bg46" event={"ID":"9b24f152-893f-4fc5-a2dc-c47afb40840e","Type":"ContainerDied","Data":"a0eb94cd90c7ec74490cd08b0130dbad6049bf580545a82c4fcf639132bc1fb4"} Dec 09 16:48:18 crc kubenswrapper[4716]: I1209 16:48:18.070226 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7bg46" event={"ID":"9b24f152-893f-4fc5-a2dc-c47afb40840e","Type":"ContainerStarted","Data":"d437aafc99cb501f196092ce1e1a2517dbd446dc38ac3f99f523c02bd7924f01"} Dec 09 16:48:19 crc kubenswrapper[4716]: I1209 16:48:19.083684 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7bg46" event={"ID":"9b24f152-893f-4fc5-a2dc-c47afb40840e","Type":"ContainerStarted","Data":"4af83e0c4d52cfc971a9adeaa2a19f137a081df47b8e70c88fa8ca4e4c93fccc"} Dec 09 16:48:20 crc kubenswrapper[4716]: I1209 16:48:20.098128 4716 generic.go:334] "Generic (PLEG): container finished" podID="9b24f152-893f-4fc5-a2dc-c47afb40840e" containerID="4af83e0c4d52cfc971a9adeaa2a19f137a081df47b8e70c88fa8ca4e4c93fccc" exitCode=0 Dec 09 16:48:20 crc kubenswrapper[4716]: I1209 16:48:20.098479 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7bg46" event={"ID":"9b24f152-893f-4fc5-a2dc-c47afb40840e","Type":"ContainerDied","Data":"4af83e0c4d52cfc971a9adeaa2a19f137a081df47b8e70c88fa8ca4e4c93fccc"} Dec 09 16:48:21 crc kubenswrapper[4716]: I1209 16:48:21.113834 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7bg46" event={"ID":"9b24f152-893f-4fc5-a2dc-c47afb40840e","Type":"ContainerStarted","Data":"ae0b30b98e70b101a04a8ec053626f8a19567d4792b43e648674827c1425923c"} Dec 09 16:48:21 crc kubenswrapper[4716]: I1209 16:48:21.138592 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-7bg46" podStartSLOduration=2.482885407 podStartE2EDuration="5.138553914s" podCreationTimestamp="2025-12-09 16:48:16 +0000 UTC" firstStartedPulling="2025-12-09 16:48:18.071669246 +0000 UTC m=+5985.226413234" lastFinishedPulling="2025-12-09 16:48:20.727337753 +0000 UTC m=+5987.882081741" observedRunningTime="2025-12-09 16:48:21.13173019 +0000 UTC m=+5988.286474188" watchObservedRunningTime="2025-12-09 16:48:21.138553914 +0000 UTC m=+5988.293297902" Dec 09 16:48:24 crc kubenswrapper[4716]: E1209 16:48:24.217123 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:48:26 crc kubenswrapper[4716]: I1209 16:48:26.920545 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-7bg46" Dec 09 16:48:26 crc kubenswrapper[4716]: I1209 16:48:26.921015 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-7bg46" Dec 09 16:48:26 crc kubenswrapper[4716]: I1209 16:48:26.972957 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-7bg46" Dec 09 16:48:27 crc kubenswrapper[4716]: I1209 16:48:27.227403 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-7bg46" Dec 09 16:48:27 crc kubenswrapper[4716]: I1209 16:48:27.290776 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7bg46"] Dec 09 16:48:29 crc kubenswrapper[4716]: I1209 16:48:29.198572 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-7bg46" podUID="9b24f152-893f-4fc5-a2dc-c47afb40840e" containerName="registry-server" containerID="cri-o://ae0b30b98e70b101a04a8ec053626f8a19567d4792b43e648674827c1425923c" gracePeriod=2 Dec 09 16:48:29 crc kubenswrapper[4716]: E1209 16:48:29.216590 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:48:30 crc kubenswrapper[4716]: I1209 16:48:30.221201 4716 generic.go:334] "Generic (PLEG): container finished" podID="9b24f152-893f-4fc5-a2dc-c47afb40840e" containerID="ae0b30b98e70b101a04a8ec053626f8a19567d4792b43e648674827c1425923c" exitCode=0 Dec 09 16:48:30 crc kubenswrapper[4716]: I1209 16:48:30.221242 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7bg46" event={"ID":"9b24f152-893f-4fc5-a2dc-c47afb40840e","Type":"ContainerDied","Data":"ae0b30b98e70b101a04a8ec053626f8a19567d4792b43e648674827c1425923c"} Dec 09 16:48:30 crc kubenswrapper[4716]: I1209 16:48:30.221766 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7bg46" event={"ID":"9b24f152-893f-4fc5-a2dc-c47afb40840e","Type":"ContainerDied","Data":"d437aafc99cb501f196092ce1e1a2517dbd446dc38ac3f99f523c02bd7924f01"} Dec 09 16:48:30 crc kubenswrapper[4716]: I1209 16:48:30.221779 4716 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d437aafc99cb501f196092ce1e1a2517dbd446dc38ac3f99f523c02bd7924f01" Dec 09 16:48:30 crc kubenswrapper[4716]: I1209 16:48:30.293857 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7bg46" Dec 09 16:48:30 crc kubenswrapper[4716]: I1209 16:48:30.367638 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b24f152-893f-4fc5-a2dc-c47afb40840e-utilities\") pod \"9b24f152-893f-4fc5-a2dc-c47afb40840e\" (UID: \"9b24f152-893f-4fc5-a2dc-c47afb40840e\") " Dec 09 16:48:30 crc kubenswrapper[4716]: I1209 16:48:30.369064 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj49z\" (UniqueName: \"kubernetes.io/projected/9b24f152-893f-4fc5-a2dc-c47afb40840e-kube-api-access-pj49z\") pod \"9b24f152-893f-4fc5-a2dc-c47afb40840e\" (UID: \"9b24f152-893f-4fc5-a2dc-c47afb40840e\") " Dec 09 16:48:30 crc kubenswrapper[4716]: I1209 16:48:30.369218 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b24f152-893f-4fc5-a2dc-c47afb40840e-catalog-content\") pod \"9b24f152-893f-4fc5-a2dc-c47afb40840e\" (UID: \"9b24f152-893f-4fc5-a2dc-c47afb40840e\") " Dec 09 16:48:30 crc kubenswrapper[4716]: I1209 16:48:30.369614 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b24f152-893f-4fc5-a2dc-c47afb40840e-utilities" (OuterVolumeSpecName: "utilities") pod "9b24f152-893f-4fc5-a2dc-c47afb40840e" (UID: "9b24f152-893f-4fc5-a2dc-c47afb40840e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:48:30 crc kubenswrapper[4716]: I1209 16:48:30.370706 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b24f152-893f-4fc5-a2dc-c47afb40840e-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 16:48:30 crc kubenswrapper[4716]: I1209 16:48:30.383471 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b24f152-893f-4fc5-a2dc-c47afb40840e-kube-api-access-pj49z" (OuterVolumeSpecName: "kube-api-access-pj49z") pod "9b24f152-893f-4fc5-a2dc-c47afb40840e" (UID: "9b24f152-893f-4fc5-a2dc-c47afb40840e"). InnerVolumeSpecName "kube-api-access-pj49z". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:48:30 crc kubenswrapper[4716]: I1209 16:48:30.423550 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b24f152-893f-4fc5-a2dc-c47afb40840e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9b24f152-893f-4fc5-a2dc-c47afb40840e" (UID: "9b24f152-893f-4fc5-a2dc-c47afb40840e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:48:30 crc kubenswrapper[4716]: I1209 16:48:30.473411 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b24f152-893f-4fc5-a2dc-c47afb40840e-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 16:48:30 crc kubenswrapper[4716]: I1209 16:48:30.473452 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj49z\" (UniqueName: \"kubernetes.io/projected/9b24f152-893f-4fc5-a2dc-c47afb40840e-kube-api-access-pj49z\") on node \"crc\" DevicePath \"\"" Dec 09 16:48:31 crc kubenswrapper[4716]: I1209 16:48:31.230940 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7bg46" Dec 09 16:48:31 crc kubenswrapper[4716]: I1209 16:48:31.274355 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7bg46"] Dec 09 16:48:31 crc kubenswrapper[4716]: I1209 16:48:31.287030 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-7bg46"] Dec 09 16:48:33 crc kubenswrapper[4716]: I1209 16:48:33.232469 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b24f152-893f-4fc5-a2dc-c47afb40840e" path="/var/lib/kubelet/pods/9b24f152-893f-4fc5-a2dc-c47afb40840e/volumes" Dec 09 16:48:35 crc kubenswrapper[4716]: E1209 16:48:35.216903 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:48:44 crc kubenswrapper[4716]: E1209 16:48:44.215757 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:48:47 crc kubenswrapper[4716]: I1209 16:48:47.922260 4716 patch_prober.go:28] interesting pod/machine-config-daemon-rdkb2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 09 16:48:47 crc kubenswrapper[4716]: I1209 16:48:47.922842 4716 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 09 16:48:47 crc kubenswrapper[4716]: I1209 16:48:47.922900 4716 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" Dec 09 16:48:47 crc kubenswrapper[4716]: I1209 16:48:47.924002 4716 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"38fb42fdc21ab06818d4fd8811cd4e2755a301a7b0bd8235e0fb2d36e44f6ea3"} pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 09 16:48:47 crc kubenswrapper[4716]: I1209 16:48:47.924054 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerName="machine-config-daemon" containerID="cri-o://38fb42fdc21ab06818d4fd8811cd4e2755a301a7b0bd8235e0fb2d36e44f6ea3" gracePeriod=600 Dec 09 16:48:48 crc kubenswrapper[4716]: E1209 16:48:48.069328 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:48:48 crc kubenswrapper[4716]: I1209 16:48:48.440232 4716 generic.go:334] "Generic (PLEG): container finished" podID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" containerID="38fb42fdc21ab06818d4fd8811cd4e2755a301a7b0bd8235e0fb2d36e44f6ea3" exitCode=0 Dec 09 16:48:48 crc kubenswrapper[4716]: I1209 16:48:48.440339 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" event={"ID":"d92cd91c-19c2-4865-a522-6d1e3a4cd6a5","Type":"ContainerDied","Data":"38fb42fdc21ab06818d4fd8811cd4e2755a301a7b0bd8235e0fb2d36e44f6ea3"} Dec 09 16:48:48 crc kubenswrapper[4716]: I1209 16:48:48.440411 4716 scope.go:117] "RemoveContainer" containerID="25624bd532411991661615df205e51c08162f3ddb1213bad665cdcf2e9f1cb15" Dec 09 16:48:48 crc kubenswrapper[4716]: I1209 16:48:48.441883 4716 scope.go:117] "RemoveContainer" containerID="38fb42fdc21ab06818d4fd8811cd4e2755a301a7b0bd8235e0fb2d36e44f6ea3" Dec 09 16:48:48 crc kubenswrapper[4716]: E1209 16:48:48.442344 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:48:50 crc kubenswrapper[4716]: E1209 16:48:50.216610 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:48:55 crc kubenswrapper[4716]: E1209 16:48:55.216924 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:48:59 crc kubenswrapper[4716]: I1209 16:48:59.214471 4716 scope.go:117] "RemoveContainer" containerID="38fb42fdc21ab06818d4fd8811cd4e2755a301a7b0bd8235e0fb2d36e44f6ea3" Dec 09 16:48:59 crc kubenswrapper[4716]: E1209 16:48:59.215429 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:49:01 crc kubenswrapper[4716]: E1209 16:49:01.220122 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:49:07 crc kubenswrapper[4716]: E1209 16:49:07.217975 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:49:10 crc kubenswrapper[4716]: I1209 16:49:10.214157 4716 scope.go:117] "RemoveContainer" containerID="38fb42fdc21ab06818d4fd8811cd4e2755a301a7b0bd8235e0fb2d36e44f6ea3" Dec 09 16:49:10 crc kubenswrapper[4716]: E1209 16:49:10.215313 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:49:13 crc kubenswrapper[4716]: E1209 16:49:13.223564 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:49:19 crc kubenswrapper[4716]: E1209 16:49:19.218310 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:49:22 crc kubenswrapper[4716]: I1209 16:49:22.213978 4716 scope.go:117] "RemoveContainer" containerID="38fb42fdc21ab06818d4fd8811cd4e2755a301a7b0bd8235e0fb2d36e44f6ea3" Dec 09 16:49:22 crc kubenswrapper[4716]: E1209 16:49:22.214436 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:49:26 crc kubenswrapper[4716]: I1209 16:49:26.216254 4716 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 09 16:49:26 crc kubenswrapper[4716]: E1209 16:49:26.342937 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 09 16:49:26 crc kubenswrapper[4716]: E1209 16:49:26.343009 4716 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 09 16:49:26 crc kubenswrapper[4716]: E1209 16:49:26.343173 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qfm49,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-msshl_openstack(239c4119-944c-46e1-9425-285eeb6e0204): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 16:49:26 crc kubenswrapper[4716]: E1209 16:49:26.344295 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:49:30 crc kubenswrapper[4716]: E1209 16:49:30.217907 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:49:34 crc kubenswrapper[4716]: I1209 16:49:34.214532 4716 scope.go:117] "RemoveContainer" containerID="38fb42fdc21ab06818d4fd8811cd4e2755a301a7b0bd8235e0fb2d36e44f6ea3" Dec 09 16:49:34 crc kubenswrapper[4716]: E1209 16:49:34.215616 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:49:37 crc kubenswrapper[4716]: I1209 16:49:37.505828 4716 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-qw9wt"] Dec 09 16:49:37 crc kubenswrapper[4716]: E1209 16:49:37.506969 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b24f152-893f-4fc5-a2dc-c47afb40840e" containerName="registry-server" Dec 09 16:49:37 crc kubenswrapper[4716]: I1209 16:49:37.506987 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b24f152-893f-4fc5-a2dc-c47afb40840e" containerName="registry-server" Dec 09 16:49:37 crc kubenswrapper[4716]: E1209 16:49:37.507032 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b24f152-893f-4fc5-a2dc-c47afb40840e" containerName="extract-utilities" Dec 09 16:49:37 crc kubenswrapper[4716]: I1209 16:49:37.507038 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b24f152-893f-4fc5-a2dc-c47afb40840e" containerName="extract-utilities" Dec 09 16:49:37 crc kubenswrapper[4716]: E1209 16:49:37.507053 4716 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b24f152-893f-4fc5-a2dc-c47afb40840e" containerName="extract-content" Dec 09 16:49:37 crc kubenswrapper[4716]: I1209 16:49:37.507059 4716 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b24f152-893f-4fc5-a2dc-c47afb40840e" containerName="extract-content" Dec 09 16:49:37 crc kubenswrapper[4716]: I1209 16:49:37.507318 4716 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b24f152-893f-4fc5-a2dc-c47afb40840e" containerName="registry-server" Dec 09 16:49:37 crc kubenswrapper[4716]: I1209 16:49:37.509156 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qw9wt" Dec 09 16:49:37 crc kubenswrapper[4716]: I1209 16:49:37.537570 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qw9wt"] Dec 09 16:49:37 crc kubenswrapper[4716]: I1209 16:49:37.625206 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7de5ea4-5d50-42a5-b4c1-608b389eb918-catalog-content\") pod \"redhat-marketplace-qw9wt\" (UID: \"b7de5ea4-5d50-42a5-b4c1-608b389eb918\") " pod="openshift-marketplace/redhat-marketplace-qw9wt" Dec 09 16:49:37 crc kubenswrapper[4716]: I1209 16:49:37.625348 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6bzlq\" (UniqueName: \"kubernetes.io/projected/b7de5ea4-5d50-42a5-b4c1-608b389eb918-kube-api-access-6bzlq\") pod \"redhat-marketplace-qw9wt\" (UID: \"b7de5ea4-5d50-42a5-b4c1-608b389eb918\") " pod="openshift-marketplace/redhat-marketplace-qw9wt" Dec 09 16:49:37 crc kubenswrapper[4716]: I1209 16:49:37.625419 4716 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7de5ea4-5d50-42a5-b4c1-608b389eb918-utilities\") pod \"redhat-marketplace-qw9wt\" (UID: \"b7de5ea4-5d50-42a5-b4c1-608b389eb918\") " pod="openshift-marketplace/redhat-marketplace-qw9wt" Dec 09 16:49:37 crc kubenswrapper[4716]: I1209 16:49:37.727376 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7de5ea4-5d50-42a5-b4c1-608b389eb918-utilities\") pod \"redhat-marketplace-qw9wt\" (UID: \"b7de5ea4-5d50-42a5-b4c1-608b389eb918\") " pod="openshift-marketplace/redhat-marketplace-qw9wt" Dec 09 16:49:37 crc kubenswrapper[4716]: I1209 16:49:37.727530 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7de5ea4-5d50-42a5-b4c1-608b389eb918-catalog-content\") pod \"redhat-marketplace-qw9wt\" (UID: \"b7de5ea4-5d50-42a5-b4c1-608b389eb918\") " pod="openshift-marketplace/redhat-marketplace-qw9wt" Dec 09 16:49:37 crc kubenswrapper[4716]: I1209 16:49:37.727641 4716 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6bzlq\" (UniqueName: \"kubernetes.io/projected/b7de5ea4-5d50-42a5-b4c1-608b389eb918-kube-api-access-6bzlq\") pod \"redhat-marketplace-qw9wt\" (UID: \"b7de5ea4-5d50-42a5-b4c1-608b389eb918\") " pod="openshift-marketplace/redhat-marketplace-qw9wt" Dec 09 16:49:37 crc kubenswrapper[4716]: I1209 16:49:37.728105 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7de5ea4-5d50-42a5-b4c1-608b389eb918-utilities\") pod \"redhat-marketplace-qw9wt\" (UID: \"b7de5ea4-5d50-42a5-b4c1-608b389eb918\") " pod="openshift-marketplace/redhat-marketplace-qw9wt" Dec 09 16:49:37 crc kubenswrapper[4716]: I1209 16:49:37.728153 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7de5ea4-5d50-42a5-b4c1-608b389eb918-catalog-content\") pod \"redhat-marketplace-qw9wt\" (UID: \"b7de5ea4-5d50-42a5-b4c1-608b389eb918\") " pod="openshift-marketplace/redhat-marketplace-qw9wt" Dec 09 16:49:37 crc kubenswrapper[4716]: I1209 16:49:37.756076 4716 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6bzlq\" (UniqueName: \"kubernetes.io/projected/b7de5ea4-5d50-42a5-b4c1-608b389eb918-kube-api-access-6bzlq\") pod \"redhat-marketplace-qw9wt\" (UID: \"b7de5ea4-5d50-42a5-b4c1-608b389eb918\") " pod="openshift-marketplace/redhat-marketplace-qw9wt" Dec 09 16:49:37 crc kubenswrapper[4716]: I1209 16:49:37.837635 4716 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qw9wt" Dec 09 16:49:38 crc kubenswrapper[4716]: E1209 16:49:38.216890 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:49:38 crc kubenswrapper[4716]: I1209 16:49:38.422903 4716 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qw9wt"] Dec 09 16:49:38 crc kubenswrapper[4716]: I1209 16:49:38.982670 4716 generic.go:334] "Generic (PLEG): container finished" podID="b7de5ea4-5d50-42a5-b4c1-608b389eb918" containerID="ff9bf3ec1268d43407f54b926f4a583d86efa1d343c44c5716871b924039970d" exitCode=0 Dec 09 16:49:38 crc kubenswrapper[4716]: I1209 16:49:38.982722 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qw9wt" event={"ID":"b7de5ea4-5d50-42a5-b4c1-608b389eb918","Type":"ContainerDied","Data":"ff9bf3ec1268d43407f54b926f4a583d86efa1d343c44c5716871b924039970d"} Dec 09 16:49:38 crc kubenswrapper[4716]: I1209 16:49:38.982754 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qw9wt" event={"ID":"b7de5ea4-5d50-42a5-b4c1-608b389eb918","Type":"ContainerStarted","Data":"a201433a457a7e61cbe659429599dba5f5a8eba8880bb36301c50cd8f382a391"} Dec 09 16:49:41 crc kubenswrapper[4716]: I1209 16:49:41.005417 4716 generic.go:334] "Generic (PLEG): container finished" podID="b7de5ea4-5d50-42a5-b4c1-608b389eb918" containerID="5ae7543dbc9f3681bd2b65583ea22c196bc50421331896acc4e691184f349bc5" exitCode=0 Dec 09 16:49:41 crc kubenswrapper[4716]: I1209 16:49:41.005512 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qw9wt" event={"ID":"b7de5ea4-5d50-42a5-b4c1-608b389eb918","Type":"ContainerDied","Data":"5ae7543dbc9f3681bd2b65583ea22c196bc50421331896acc4e691184f349bc5"} Dec 09 16:49:42 crc kubenswrapper[4716]: I1209 16:49:42.020600 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qw9wt" event={"ID":"b7de5ea4-5d50-42a5-b4c1-608b389eb918","Type":"ContainerStarted","Data":"6cab97fa76e89b4f1937bfa1cf4e17bd38959c626902753746563a3b1b481da1"} Dec 09 16:49:42 crc kubenswrapper[4716]: I1209 16:49:42.045354 4716 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-qw9wt" podStartSLOduration=2.268895745 podStartE2EDuration="5.045331012s" podCreationTimestamp="2025-12-09 16:49:37 +0000 UTC" firstStartedPulling="2025-12-09 16:49:38.984903058 +0000 UTC m=+6066.139647046" lastFinishedPulling="2025-12-09 16:49:41.761338325 +0000 UTC m=+6068.916082313" observedRunningTime="2025-12-09 16:49:42.038640952 +0000 UTC m=+6069.193384960" watchObservedRunningTime="2025-12-09 16:49:42.045331012 +0000 UTC m=+6069.200075000" Dec 09 16:49:44 crc kubenswrapper[4716]: E1209 16:49:44.216583 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:49:47 crc kubenswrapper[4716]: I1209 16:49:47.838010 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-qw9wt" Dec 09 16:49:47 crc kubenswrapper[4716]: I1209 16:49:47.838474 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-qw9wt" Dec 09 16:49:47 crc kubenswrapper[4716]: I1209 16:49:47.890501 4716 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-qw9wt" Dec 09 16:49:48 crc kubenswrapper[4716]: I1209 16:49:48.149230 4716 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-qw9wt" Dec 09 16:49:48 crc kubenswrapper[4716]: I1209 16:49:48.266264 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qw9wt"] Dec 09 16:49:49 crc kubenswrapper[4716]: I1209 16:49:49.213589 4716 scope.go:117] "RemoveContainer" containerID="38fb42fdc21ab06818d4fd8811cd4e2755a301a7b0bd8235e0fb2d36e44f6ea3" Dec 09 16:49:49 crc kubenswrapper[4716]: E1209 16:49:49.214247 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:49:50 crc kubenswrapper[4716]: I1209 16:49:50.113475 4716 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-qw9wt" podUID="b7de5ea4-5d50-42a5-b4c1-608b389eb918" containerName="registry-server" containerID="cri-o://6cab97fa76e89b4f1937bfa1cf4e17bd38959c626902753746563a3b1b481da1" gracePeriod=2 Dec 09 16:49:50 crc kubenswrapper[4716]: I1209 16:49:50.619353 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qw9wt" Dec 09 16:49:50 crc kubenswrapper[4716]: I1209 16:49:50.695057 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7de5ea4-5d50-42a5-b4c1-608b389eb918-utilities\") pod \"b7de5ea4-5d50-42a5-b4c1-608b389eb918\" (UID: \"b7de5ea4-5d50-42a5-b4c1-608b389eb918\") " Dec 09 16:49:50 crc kubenswrapper[4716]: I1209 16:49:50.695316 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6bzlq\" (UniqueName: \"kubernetes.io/projected/b7de5ea4-5d50-42a5-b4c1-608b389eb918-kube-api-access-6bzlq\") pod \"b7de5ea4-5d50-42a5-b4c1-608b389eb918\" (UID: \"b7de5ea4-5d50-42a5-b4c1-608b389eb918\") " Dec 09 16:49:50 crc kubenswrapper[4716]: I1209 16:49:50.695401 4716 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7de5ea4-5d50-42a5-b4c1-608b389eb918-catalog-content\") pod \"b7de5ea4-5d50-42a5-b4c1-608b389eb918\" (UID: \"b7de5ea4-5d50-42a5-b4c1-608b389eb918\") " Dec 09 16:49:50 crc kubenswrapper[4716]: I1209 16:49:50.697333 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b7de5ea4-5d50-42a5-b4c1-608b389eb918-utilities" (OuterVolumeSpecName: "utilities") pod "b7de5ea4-5d50-42a5-b4c1-608b389eb918" (UID: "b7de5ea4-5d50-42a5-b4c1-608b389eb918"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:49:50 crc kubenswrapper[4716]: I1209 16:49:50.702178 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7de5ea4-5d50-42a5-b4c1-608b389eb918-kube-api-access-6bzlq" (OuterVolumeSpecName: "kube-api-access-6bzlq") pod "b7de5ea4-5d50-42a5-b4c1-608b389eb918" (UID: "b7de5ea4-5d50-42a5-b4c1-608b389eb918"). InnerVolumeSpecName "kube-api-access-6bzlq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 09 16:49:50 crc kubenswrapper[4716]: I1209 16:49:50.721869 4716 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b7de5ea4-5d50-42a5-b4c1-608b389eb918-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b7de5ea4-5d50-42a5-b4c1-608b389eb918" (UID: "b7de5ea4-5d50-42a5-b4c1-608b389eb918"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 09 16:49:50 crc kubenswrapper[4716]: I1209 16:49:50.798739 4716 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7de5ea4-5d50-42a5-b4c1-608b389eb918-utilities\") on node \"crc\" DevicePath \"\"" Dec 09 16:49:50 crc kubenswrapper[4716]: I1209 16:49:50.798776 4716 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6bzlq\" (UniqueName: \"kubernetes.io/projected/b7de5ea4-5d50-42a5-b4c1-608b389eb918-kube-api-access-6bzlq\") on node \"crc\" DevicePath \"\"" Dec 09 16:49:50 crc kubenswrapper[4716]: I1209 16:49:50.798787 4716 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7de5ea4-5d50-42a5-b4c1-608b389eb918-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 09 16:49:51 crc kubenswrapper[4716]: I1209 16:49:51.127941 4716 generic.go:334] "Generic (PLEG): container finished" podID="b7de5ea4-5d50-42a5-b4c1-608b389eb918" containerID="6cab97fa76e89b4f1937bfa1cf4e17bd38959c626902753746563a3b1b481da1" exitCode=0 Dec 09 16:49:51 crc kubenswrapper[4716]: I1209 16:49:51.128003 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qw9wt" event={"ID":"b7de5ea4-5d50-42a5-b4c1-608b389eb918","Type":"ContainerDied","Data":"6cab97fa76e89b4f1937bfa1cf4e17bd38959c626902753746563a3b1b481da1"} Dec 09 16:49:51 crc kubenswrapper[4716]: I1209 16:49:51.128042 4716 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qw9wt" event={"ID":"b7de5ea4-5d50-42a5-b4c1-608b389eb918","Type":"ContainerDied","Data":"a201433a457a7e61cbe659429599dba5f5a8eba8880bb36301c50cd8f382a391"} Dec 09 16:49:51 crc kubenswrapper[4716]: I1209 16:49:51.128066 4716 scope.go:117] "RemoveContainer" containerID="6cab97fa76e89b4f1937bfa1cf4e17bd38959c626902753746563a3b1b481da1" Dec 09 16:49:51 crc kubenswrapper[4716]: I1209 16:49:51.128265 4716 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qw9wt" Dec 09 16:49:51 crc kubenswrapper[4716]: I1209 16:49:51.164240 4716 scope.go:117] "RemoveContainer" containerID="5ae7543dbc9f3681bd2b65583ea22c196bc50421331896acc4e691184f349bc5" Dec 09 16:49:51 crc kubenswrapper[4716]: I1209 16:49:51.177486 4716 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qw9wt"] Dec 09 16:49:51 crc kubenswrapper[4716]: I1209 16:49:51.189993 4716 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-qw9wt"] Dec 09 16:49:51 crc kubenswrapper[4716]: I1209 16:49:51.211349 4716 scope.go:117] "RemoveContainer" containerID="ff9bf3ec1268d43407f54b926f4a583d86efa1d343c44c5716871b924039970d" Dec 09 16:49:51 crc kubenswrapper[4716]: I1209 16:49:51.228382 4716 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b7de5ea4-5d50-42a5-b4c1-608b389eb918" path="/var/lib/kubelet/pods/b7de5ea4-5d50-42a5-b4c1-608b389eb918/volumes" Dec 09 16:49:51 crc kubenswrapper[4716]: I1209 16:49:51.247644 4716 scope.go:117] "RemoveContainer" containerID="6cab97fa76e89b4f1937bfa1cf4e17bd38959c626902753746563a3b1b481da1" Dec 09 16:49:51 crc kubenswrapper[4716]: E1209 16:49:51.248216 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6cab97fa76e89b4f1937bfa1cf4e17bd38959c626902753746563a3b1b481da1\": container with ID starting with 6cab97fa76e89b4f1937bfa1cf4e17bd38959c626902753746563a3b1b481da1 not found: ID does not exist" containerID="6cab97fa76e89b4f1937bfa1cf4e17bd38959c626902753746563a3b1b481da1" Dec 09 16:49:51 crc kubenswrapper[4716]: I1209 16:49:51.248264 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6cab97fa76e89b4f1937bfa1cf4e17bd38959c626902753746563a3b1b481da1"} err="failed to get container status \"6cab97fa76e89b4f1937bfa1cf4e17bd38959c626902753746563a3b1b481da1\": rpc error: code = NotFound desc = could not find container \"6cab97fa76e89b4f1937bfa1cf4e17bd38959c626902753746563a3b1b481da1\": container with ID starting with 6cab97fa76e89b4f1937bfa1cf4e17bd38959c626902753746563a3b1b481da1 not found: ID does not exist" Dec 09 16:49:51 crc kubenswrapper[4716]: I1209 16:49:51.248330 4716 scope.go:117] "RemoveContainer" containerID="5ae7543dbc9f3681bd2b65583ea22c196bc50421331896acc4e691184f349bc5" Dec 09 16:49:51 crc kubenswrapper[4716]: E1209 16:49:51.248747 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5ae7543dbc9f3681bd2b65583ea22c196bc50421331896acc4e691184f349bc5\": container with ID starting with 5ae7543dbc9f3681bd2b65583ea22c196bc50421331896acc4e691184f349bc5 not found: ID does not exist" containerID="5ae7543dbc9f3681bd2b65583ea22c196bc50421331896acc4e691184f349bc5" Dec 09 16:49:51 crc kubenswrapper[4716]: I1209 16:49:51.248852 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ae7543dbc9f3681bd2b65583ea22c196bc50421331896acc4e691184f349bc5"} err="failed to get container status \"5ae7543dbc9f3681bd2b65583ea22c196bc50421331896acc4e691184f349bc5\": rpc error: code = NotFound desc = could not find container \"5ae7543dbc9f3681bd2b65583ea22c196bc50421331896acc4e691184f349bc5\": container with ID starting with 5ae7543dbc9f3681bd2b65583ea22c196bc50421331896acc4e691184f349bc5 not found: ID does not exist" Dec 09 16:49:51 crc kubenswrapper[4716]: I1209 16:49:51.248978 4716 scope.go:117] "RemoveContainer" containerID="ff9bf3ec1268d43407f54b926f4a583d86efa1d343c44c5716871b924039970d" Dec 09 16:49:51 crc kubenswrapper[4716]: E1209 16:49:51.249324 4716 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff9bf3ec1268d43407f54b926f4a583d86efa1d343c44c5716871b924039970d\": container with ID starting with ff9bf3ec1268d43407f54b926f4a583d86efa1d343c44c5716871b924039970d not found: ID does not exist" containerID="ff9bf3ec1268d43407f54b926f4a583d86efa1d343c44c5716871b924039970d" Dec 09 16:49:51 crc kubenswrapper[4716]: I1209 16:49:51.249370 4716 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff9bf3ec1268d43407f54b926f4a583d86efa1d343c44c5716871b924039970d"} err="failed to get container status \"ff9bf3ec1268d43407f54b926f4a583d86efa1d343c44c5716871b924039970d\": rpc error: code = NotFound desc = could not find container \"ff9bf3ec1268d43407f54b926f4a583d86efa1d343c44c5716871b924039970d\": container with ID starting with ff9bf3ec1268d43407f54b926f4a583d86efa1d343c44c5716871b924039970d not found: ID does not exist" Dec 09 16:49:52 crc kubenswrapper[4716]: E1209 16:49:52.219689 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:49:55 crc kubenswrapper[4716]: E1209 16:49:55.313467 4716 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 16:49:55 crc kubenswrapper[4716]: E1209 16:49:55.314371 4716 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 09 16:49:55 crc kubenswrapper[4716]: E1209 16:49:55.314633 4716 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5fbh9fh77h667h669h94h589h59dh5bdh5bch9fh68h689h699h559h549h56fh5f5h68fh557h5cch7fh66ch6ch86h56h75h55fh58h5ffhf9h5fdq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lz724,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(29cebb2d-8cdb-49de-a29d-1d02808e46a9): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 09 16:49:55 crc kubenswrapper[4716]: E1209 16:49:55.315885 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:50:01 crc kubenswrapper[4716]: I1209 16:50:01.214239 4716 scope.go:117] "RemoveContainer" containerID="38fb42fdc21ab06818d4fd8811cd4e2755a301a7b0bd8235e0fb2d36e44f6ea3" Dec 09 16:50:01 crc kubenswrapper[4716]: E1209 16:50:01.215071 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:50:04 crc kubenswrapper[4716]: E1209 16:50:04.217099 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:50:09 crc kubenswrapper[4716]: E1209 16:50:09.215853 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:50:12 crc kubenswrapper[4716]: I1209 16:50:12.214350 4716 scope.go:117] "RemoveContainer" containerID="38fb42fdc21ab06818d4fd8811cd4e2755a301a7b0bd8235e0fb2d36e44f6ea3" Dec 09 16:50:12 crc kubenswrapper[4716]: E1209 16:50:12.214996 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:50:15 crc kubenswrapper[4716]: E1209 16:50:15.217817 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:50:22 crc kubenswrapper[4716]: E1209 16:50:22.215998 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" Dec 09 16:50:24 crc kubenswrapper[4716]: I1209 16:50:24.215326 4716 scope.go:117] "RemoveContainer" containerID="38fb42fdc21ab06818d4fd8811cd4e2755a301a7b0bd8235e0fb2d36e44f6ea3" Dec 09 16:50:24 crc kubenswrapper[4716]: E1209 16:50:24.216606 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:50:29 crc kubenswrapper[4716]: E1209 16:50:29.217943 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-msshl" podUID="239c4119-944c-46e1-9425-285eeb6e0204" Dec 09 16:50:35 crc kubenswrapper[4716]: I1209 16:50:35.214233 4716 scope.go:117] "RemoveContainer" containerID="38fb42fdc21ab06818d4fd8811cd4e2755a301a7b0bd8235e0fb2d36e44f6ea3" Dec 09 16:50:35 crc kubenswrapper[4716]: E1209 16:50:35.214985 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-rdkb2_openshift-machine-config-operator(d92cd91c-19c2-4865-a522-6d1e3a4cd6a5)\"" pod="openshift-machine-config-operator/machine-config-daemon-rdkb2" podUID="d92cd91c-19c2-4865-a522-6d1e3a4cd6a5" Dec 09 16:50:35 crc kubenswrapper[4716]: E1209 16:50:35.216588 4716 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="29cebb2d-8cdb-49de-a29d-1d02808e46a9" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515116051545024450 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015116051546017366 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015116035263016507 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015116035264015460 5ustar corecore